Secure Your Containers with Automated Backup scripts
The OG Docker GUI |
---|
- Portainer |
--
--
Media Servers |
---|
the arrs all in one is highly recommended to use |
- THE ARRS ALL IN ONE |
- Jellyfin |
- Jellyseerr |
- Navidrome |
- Ampache |
- Mango epubs |
--
Security |
---|
- Authentik |
- Zitadel |
- Authelia 2FA |
- Vaultwarden |
- Changedetection |
- iSpy |
- UniFi Controller |
- UniFi Protect |
--
Monitoring |
---|
- Grafana |
- Prometheus |
- Uptime Kuma |
- Matomo |
- Librespeed |
- Netdata |
--
Networking |
---|
- Traefik |
- Nginx Proxy Manager |
- Unbound |
- Pihole |
- Netbootxyz |
- Duck DNS |
- Netbird |
- Wireguard |
- fail2ban |
- CrowdSec |
- Cloudflared |
- Teleport |
- Mailcow |
- Netbox |
--
Communication |
---|
- Matrix |
- Zulip |
- Jitsi |
- Rocket.Chat |
- Nextcloud |
- Workadventure |
- Invision Community |
- HumHub |
- Spacebar |
- Discord Bot |
- Habbo Hotel Retro |
--
Management |
---|
- Ansible Semaphore |
- Guacamole |
- Remotely |
- RustDesk |
- UpSnap |
- Pterodactyl |
- PufferPanel |
- Seafile |
- Webtop |
- Filebrowser |
- Home Assistant |
--
Productivity |
---|
- Dolibarr |
- Wiki.js |
- Nginx |
- Wordpress |
- Gitea |
- Gitlab |
- OpenProject |
- LinkWarden |
- LinkStack |
- Draw.io |
- Pwndrop |
- Snapdrop |
- Peppermint |
- UVDesk |
- GLPI |
- KASM |
- Whoogle |
- PrivateBin |
- Hastebin |
- Firefox |
- Mealie |
- Jenkins |
cd /home/myusername
mkdir docker && cd "$_"
mkdir portainer && cd "$_"
version: "3.8"
services:
portainer:
image: portainer/portainer-ce:latest # For Business Edition use: portainer/portainer-ee:latest
container_name: portainer
restart: always
volumes:
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 9443:9443 # HTTPS
- 9000:9000 # Optional: required for accessing the UI over HTTP (legacy reasons)
# - 8000:8000 # Optional: required only for Edge Agents and Edge compute features
docker compose up -d
cd /home/myusername/docker/portainer
docker compose down
docker pull portainer/portainer-ce:latest
docker compose up -d
You can add the following link to get a lot of templates Settings --> App Templates
https://raw.githubusercontent.com/Qballjos/portainer_templates/master/Template/template.json
Old version: https://raw.githubusercontent.com/portainer/templates/master/templates-2.0.json
cd /home/myusername
mkdir scripts backups
cd scripts
nano portainer-backup.sh
#!/bin/bash
# Set the Portainer container name
CONTAINER_NAME="portainer"
# Set the backup directory
BACKUP_DIR="/home/myusername/backups/portainer"
# Create a new directory for the backup
BACKUP_DATE=$(date +"%Y-%m-%d_%H-%M-%S")
BACKUP_PATH="$BACKUP_DIR/$BACKUP_DATE"
mkdir -p "$BACKUP_PATH"
# Copy the Portainer data directory from the container to the backup directory
docker cp "$CONTAINER_NAME":/data "$BACKUP_PATH"
# Compress the backup directory
tar -czf "$BACKUP_PATH.tar.gz" -C "$BACKUP_DIR" "$BACKUP_DATE"
# Remove the uncompressed backup directory
rm -rf "$BACKUP_PATH"
# Prune old backups (keep the last 7 days)
find "$BACKUP_DIR" -name "*.tar.gz" -type f -mtime +7 -delete
sudo chmod +x portainer-backup.sh
Test the script
sudo portainer-backup.sh
Schedule the script to run every 24 hours using cron
sudo crontab -e
# This cron job runs at midnight every day (0 0 * * *)
# and executes the script located at /home/myusername/scripts
0 0 * * * /home/myusername/scripts/portainer-backup.sh
cd /home/myusername/docker
mkdir -p dashy/{public,icons}
cd dashy
version: "3.8"
services:
dashy:
image: lissy93/dashy:latest
container_name: dashy
restart: unless-stopped
volumes:
- ./public/conf.yml:/app/public/conf.yml
- ./icons:/app/public/item-icons/icons
ports:
- 8100:80
cd icons
git clone https://github.com/walkxcode/Dashboard-Icons.git
cd public
appConfig:
theme: colorful
layout: auto
iconSize: medium
language: en
pageInfo:
title: Home Lab
description: Welcome to your Home Lab!
navLinks:
- title: GitHub
path: https://github.com/Lissy93/dashy
- title: Documentation
path: https://dashy.to/docs
footerText: ''
sections:
- name: Starter Only
icon: fas fa-server
items:
- title: Google
description: Search
url: https://google.com
Edit Item example:
Item Text: Portainer
Description: Docker GUI
Icon: icons/dashboard-icons/png/portainer.png
Service URL: 192.168.1.50:9999
Opening Method: newtab
cd /home/myusername/docker
mkdir hpage && cd "$_"
version: "3.8"
services:
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: hpage
restart: unless-stopped
volumes:
- ./config:/app/config
- ./icons:/app/public/icons
- ./images:/app/public/images # (optional) For custom background images
- /var/run/docker.sock:/var/run/docker.sock:ro # (optional) For docker integrations
ports:
- 8101:3000
docker compose up -d
https://gethomepage.dev/en/configs/services/
In authentik go to: Admin interface > Directory > Tokens and App passwords
Click on: Create
Insert the following info:
Identifier: homepage
Intent: API Token
User: admin
Description: API Token to display widget info of authentik.
Expiring: disabled
Click on: Copy token
Edit services.yaml and insert the following:
- Remote Management:
- Authentik:
href: http://AUTHENTIK_IP_HERE:9160
description: Single Sign On (SSO)
icon: authentik.png
server: docker
container: authentik
widget:
type: authentik
url: https://portal.DOMAIN.COM
key: api_token
cd /home/myusername/docker
mkdir homarr && cd "$_"
version: "3.8"
services:
homarr:
image: ghcr.io/ajnart/homarr:latest
container_name: homarr
restart: unless-stopped
volumes:
- ./configs:/app/data/configs
- ./icons:/app/public/icons
- /var/run/docker.sock:/var/run/docker.sock:ro
ports:
- 8102:7575
docker compose up -d
cd /home/myusername/docker
mkdir authentik && cd "$_"
mkdir themes && touch ./themes/mytheme.css
sudo apt-get install -y pwgen
echo "PG_PASS=$(pwgen -s 40 1)" >> .env && echo "AUTHENTIK_SECRET_KEY=$(pwgen -s 50 1)" >> .env && echo "AUTHENTIK_ERROR_REPORTING__ENABLED=true" >> .env && echo "COMPOSE_PORT_HTTP=9160" >> .env && echo "COMPOSE_PORT_HTTPS=9161" >> .env
version: "3.8"
services:
db:
image: docker.io/library/postgres:12-alpine
container_name: authentik_postgresql
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- ./db:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
env_file:
- .env
redis:
image: docker.io/library/redis:alpine
container_name: authentik_redis
restart: unless-stopped
command: --save 60 1 --loglevel warning
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- ./redis:/data
server:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-latest}
container_name: authentik
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
- ./themes/mytheme.css:/web/dist/custom.css
env_file:
- .env
ports:
- ${COMPOSE_PORT_HTTP:-9000}:9000
- ${COMPOSE_PORT_HTTPS:-9443}:9443
labels:
traefik.enable: true
traefik.http.routers.authentik.entryPoints: https
traefik.http.routers.authentik.rule: Host(`portal.DOMAIN.COM`) || HostRegexp(`{subdomain:[a-z0-9]+}.DOMAIN.COM`) && PathPrefix(`/outpost.goauthentik.io/`)
depends_on:
- db
- redis
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-latest}
container_name: authentik_worker
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
env_file:
- .env
depends_on:
- db
- redis
docker compose up -d
Create admin account at: https://<SERVER-IP-HERE/if/flow/initial-setup/
Go to: Admin interface > Applications > Providers
Click on: Create
Select: Proxy Provider then click Next
Insert the following settings
Name: APPNAMEHERE - Forward Auth
Authentication flow: welcome-back (Login page)
Authorization flow: default-provider-authorization-explicit-consent (Authorize Application)
Forward auth (single application)
External host: https://APPNAMEHERE.DOMAIN.COM
Token validity: hours=24
Unauthenticated Paths:
^/api/.*
^/api2/.*
^/identity/.*
^/triggers/.*
^/meshagents.*
^/meshsettings.*
^/agent.*
^/control.*
^/meshrelay.*
^/ui.*
Go to: Admin interface > Applications > Applications
Click on: Create
Insert the following settings
Name: APPNAMEHERE
Slug: APPNAMEHERE
Provider: APPNAMEHERE - Forward Auth
Policy engine mode: any
Choose a app icon in: UI settings > Icon
Go to: Admin interface > Applications > Outposts
Click on: Edit
Insert the following settings
Name: authentik Embedded Outpost
Type: Proxy
Integration: Local Docker connection
Application: APPNAMEHERE (APPNAMEHERE - Forward Auth)
Configuration:
log_level: info
docker_labels: null
authentik_host: https://portal.DOMAIN.COM
docker_network: null
container_image: null
docker_map_ports: true
kubernetes_replicas: 1
kubernetes_namespace: default
authentik_host_browser: ""
object_naming_template: ak-outpost-%(name)s
authentik_host_insecure: false
kubernetes_service_type: ClusterIP
kubernetes_image_pull_secrets: []
kubernetes_ingress_class_name: null
kubernetes_disabled_components:
- deployment
- secret
kubernetes_ingress_annotations: {}
kubernetes_ingress_secret_name: authentik-outpost-tls
Go to: Admin interface > System > Brands > Edit > Other global settings > Attributes
settings:
theme:
base: dark # dark/light/automatic
background: >
margin: 0;
padding: 0;
background-image: url("https://wallpaperaccess.com/full/8351153.gif");
background-size: cover;
background-position: center;
background-repeat: no-repeat;
background-attachment: fixed;
background-blend-mode: multiply;
background-color: #454545;
enabledFeatures:
settings: true
apiDrawer: true
applicationEdit: true
notificationDrawer: true
Go to: Admin interface > Directory > Users > (select user) > Edit > Attributes
settings:
theme:
base: dark # dark/light/automatic
background: >
margin: 0;
padding: 0;
background-image: url("https://wallpaperaccess.com/full/8351153.gif");
background-size: cover;
background-position: center;
background-repeat: no-repeat;
background-attachment: fixed;
background-blend-mode: multiply;
background-color: #454545;
enabledFeatures:
settings: true
apiDrawer: true
applicationEdit: true
notificationDrawer: true
Upload the file to:
cd /home/myusername/docker/authentik/themes/mytheme.css
/* LOGIN WINDOW */
.pf-c-login__main {
overflow: auto;
position: relative;
border-radius: 12px;
background-color: rgba(28, 30, 33, 0.8);
border: 1px solid rgba(100,100,100, 0.5);
border-left: 1px solid rgba(30,30,30, 0.5);
border-right: 1px solid rgba(30,30,30, 0.5);
border-bottom-color: rgba(200,200,200, 0.5);
backdrop-filter: blur(10px);
box-shadow:
0px 24px 38px 3px rgba(0, 0, 0, 0.75),
inset 0px 24px 48px 0 rgba(0, 0, 0, 0.5),
inset 0 1px 0px 0 rgba(0, 0, 0, 0.5),
0 1px 0px 0 rgba(0, 0, 0, 0.8),
inset 0 -1px 0 0 rgba(255,255,255,0.1),
0px -1px 0px 0 rgba(255,255,255,0.5);
}
.pf-c-login__main::before {
content: "";
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
height: 400px;
background: radial-gradient(
circle at 50% 0%,
rgba(32, 63, 109, 1) 5%,
rgba(28, 30, 33, 0) 50%
);
pointer-events: none;
z-index: -1;
max-height: 100%;
}
/* optinal image at the bottom */
/* .pf-c-login__main::after {
content: "";
display: block;
position: relative;
margin-top: -60px;
width: 100%;
height: 150px;
background-image: url("");
background-size: contain;
background-position: center;
background-repeat: no-repeat;
} */
.pf-c-login__main-body * input {
--leftright-shadow: inset 1px 0 0 0 rgba(0,0,0,0.5), inset -1px 0 0 0 rgba(0,0,0,0.5);
/* background-color: rgba(100,100,100, 0.5); */
/*background: linear-gradient(to bottom,
rgba(60,60,60, 0.5),
rgba(100,100,100, 0.5)
); */
border: none;
/* border-width: 1px !important;
border-top-color: rgba(0,0,0, 0.5);
border-bottom-color: rgba(200,200,200, 0.5); */
background-color: rgba(0, 0, 0, 0.5);
border-radius: 5px;
color: white;
box-shadow:
0 -1px 0 0 rgba(255,255,255,0.5),
0 1px 0 0 rgba(0,0,0,0.5),
inset 0 -1px 0 0 rgba(255,255,255,0.3),
inset 0 1px 0 0 rgba(0,0,0,0.3),
var(--leftright-shadow),
0 0 1px 1px rgba(0, 0, 0, 0.3);
;
outline: 0px solid transparent;
outline-offset: -1px;
transition: box-shadow 0.1s linear, outline 0.1s linear;
padding: 8px !important;
}
.pf-c-login__main-body * input:hover {
box-shadow:
0 -1px 0 0 rgba(255,255,255,0.1),
0 1px 0 0 rgba(0,0,0,0.5),
inset 0 -1px 0 0 rgba(255,255,255,0.3),
inset 0 1px 0 0 rgba(0,0,0,0.3),
var(--leftright-shadow),
0 0 8px 2px rgba(0,0,0,0.3);
}
.pf-c-login__main-body * input:focus {
outline: 2px solid rgba(255,255,255,0.5);
box-shadow:
0 -1px 0 0 rgba(255,255,255,0.1),
0 1px 0 0 rgba(0,0,0,0.5),
inset 0 -1px 0 0 rgba(255,255,255,0.3),
inset 0 1px 0 0 rgba(0,0,0,0.3),
var(--leftright-shadow),
0 0 4px 4px rgba(200,200,200,0.5);
}
.pf-c-login__footer {
padding: 0px;
}
.pf-c-login__footer ul.pf-c-list.pf-m-inline {
padding: 0px;
border-radius: 12px;
background-color: rgba(28, 30, 33, 0.8);
border: 1px solid rgba(100,100,100, 0.5);
border-left: 1px solid rgba(30,30,30, 0.5);
border-right: 1px solid rgba(30,30,30, 0.5);
border-bottom-color: rgba(200,200,200, 0.5);
backdrop-filter: blur(10px);
box-shadow:
inset 0px 24px 48px 0 rgba(0, 0, 0, 0.5),
inset 0 1px 0px 0 rgba(0, 0, 0, 0.5),
0 1px 0px 0 rgba(0, 0, 0, 0.8),
inset 0 -1px 0 0 rgba(255,255,255,0.1),
0px -1px 0px 0 rgba(255,255,255,0.5);
}
/* some other css */
Got to Admin interface --> Flows & Stages --> Stages
Select default-authentication-mfa-validation then click on Edit
Select TOTP Authenticators then scroll down and select default-authenticator-totp-setup (TOTP Authenticator Setup Stage)
For Not configured action, select: Force the user to configure an authenticator
Click on Update
Go to Admin interface --> Directory --> Groups
Click on Create
Name: authentik Users
Attributes:
settings:
enabledFeatures:
settings: true
apiDrawer: false
applicationEdit: false
notificationDrawer: true
Then click Create
Got to Admin interface --> Applications --> Applications
Click on a application and select the tab Policy / Group / User Bindings
Click on Create Binding
Select the tab Group and select your new group (authentik Users) then click Create
Go to Admin interface --> Directory --> Groups
Click on the group (authentik Users)
Select the tab Users and then click on Add existing user
If you use Cloudflare:
Make sure to set SSL/TLS to FULL
cd /home/myusername/docker
mkdir traefik-crowdsec && cd "$_"
mkdir traefik-data && cd "$_"
touch acme.json && chmod 600 acme.json
global:
checkNewVersion: true
sendAnonymousUsage: false
serversTransport:
insecureSkipVerify: true
entryPoints:
# Not used in apps, but redirect everything from HTTP to HTTPS
http:
address: :80
forwardedHeaders:
trustedIPs: &trustedIps
# Start of Clouflare public IP list for HTTP requests, remove this if you don't use it
- 173.245.48.0/20
- 103.21.244.0/22
- 103.22.200.0/22
- 103.31.4.0/22
- 141.101.64.0/18
- 108.162.192.0/18
- 190.93.240.0/20
- 188.114.96.0/20
- 197.234.240.0/22
- 198.41.128.0/17
- 162.158.0.0/15
- 104.16.0.0/12
- 172.64.0.0/13
- 131.0.72.0/22
- 2400:cb00::/32
- 2606:4700::/32
- 2803:f800::/32
- 2405:b500::/32
- 2405:8100::/32
- 2a06:98c0::/29
- 2c0f:f248::/32
# End of Cloudlare public IP list
http:
redirections:
entryPoint:
to: https
scheme: https
# HTTPS endpoint, with domain wildcard
https:
address: :443
forwardedHeaders:
# Reuse list of Cloudflare Trusted IP's above for HTTPS requests
trustedIPs: *trustedIps
http:
tls:
# Generate a wildcard domain certificate
certResolver: cloudflare
domains:
# - main: "local.DOMAIN.COM" # uncomment to enable certs for internal domain DNS
# sans: # uncomment to enable certs for internal domain DNS
# - "*.local.DOMAIN.COM" # uncomment to enable certs for internal domain DNS
- main: "DOMAIN.COM"
sans:
- "*.DOMAIN.COM"
middlewares:
- securityHeaders@file
- crowdsec-bouncer@file
- gzip@file
- cloudflarewarp@file
metrics:
address: ":8083"
providers:
providersThrottleDuration: 2s
# File provider for connecting things that are outside of docker / defining middleware
file:
filename: /etc/traefik/fileConfig.yml
watch: true
# Docker provider for connecting all apps that are inside of the docker network
docker:
watch: true
network: proxy # Add Your Docker Network Name Here
# Default host rule to containername.domain.example
defaultRule: "Host(`{{ index .Labels \"com.docker.compose.service\"}}.DOMAIN.COM`)"
# swarmModeRefreshSeconds: 15s # Enable if swarm is used https://doc.traefik.io/traefik/master/migration/v2-to-v3/
exposedByDefault: false
endpoint: "unix:///var/run/docker.sock"
# Enable traefik ui
api:
dashboard: true
insecure: true
debug: true
metrics:
prometheus:
addEntryPointsLabels: true
addRoutersLabels: true
addServicesLabels: true
entryPoint: metrics
# :8083/ping
ping:
entryPoint: metrics
log:
level: INFO # DEBUG, INFO, WARN, ERROR, FATAL, and PANIC
filePath: "/logs/traefik.log"
format: json
accessLog:
filePath: "/var/log/crowdsec/traefik_access.log"
bufferingSize: 50
format: json
filters:
statusCodes:
- "400-404"
- "300-302"
- "500"
retryAttempts: true
minDuration: "10ms"
# fields:
# defaultMode: foobar
# names:
# name0: foobar
# name1: foobar
# headers:
# defaultMode: foobar
# names:
# name0: foobar
# name1: foobar
#
# Use letsencrypt to generate ssl serficiates
certificatesResolvers:
cloudflare:
acme:
email: [email protected] # change to your provider account email address.
storage: /etc/traefik/acme.json
caServer: https://acme-v02.api.letsencrypt.org/directory # prod (default)
# caServer: https://acme-staging-v02.api.letsencrypt.org/directory # staging
dnsChallenge:
provider: cloudflare
# disablePropagationCheck: true # uncomment this if you have issues pulling certificates through cloudflare, By setting this flag to true disables the need to wait for the propagation of the TXT record to all authoritative name servers.
# delayBeforeCheck: 60s # uncomment along with disablePropagationCheck if needed to ensure the TXT record is ready before verification is attempted
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
# Plugins (optional)
# Real IP from Cloudflare Proxy/Tunnel
experimental:
plugins:
cloudflarewarp:
moduleName: "github.com/BetterCorp/cloudflarewarp"
version: "v1.3.3"
http:
## EXTERNAL ROUTING EXAMPLE - Only use if you want to proxy something manually ##
routers:
# traefik router
traefik:
entryPoints:
- https
rule: "Host(`traefik.DOMAIN.COM`) || Host(`traefik.local.DOMAIN.COM`)"
service: api@internal
middlewares:
- traefikAuth
# myservice router
myservice:
entryPoints:
- https
rule: 'Host(`myservice.DOMAIN.COM`)'
service: myservice
# middlewares:
# - "auth"
# myservice2 router
myservice2:
entryPoints:
- https
rule: 'Host(`myservice2.DOMAIN.COM`)'
service: myservice2
# middlewares:
# - "auth"
##======================SERVICES======================##
# Define the base URL for srv-prod-1
#srv_prod_1_url: &srv_prod_1_url http://192.168.x.x
services:
# service example
myservice:
loadBalancer:
servers:
- url: http://192.168.x.x:xxxx/
myservice2:
loadBalancer:
servers:
- url: http://192.168.x.x:xxxx/
##======================MIDDLEWARES======================##
middlewares:
error-pages-mw:
errors:
status:
- "400-599"
service: error-pages
query: "/{status}.html"
# Only Allow Local networks
ip-allowlist:
ipAllowList:
sourceRange:
- 127.0.0.1/32 # localhost
# Only Allow Trafic trough Authentik
auth:
forwardauth:
address: http://authentik:9000/outpost.goauthentik.io/auth/traefik
trustForwardHeader: true
authResponseHeaders:
- X-authentik-username
- X-authentik-groups
- X-authentik-email
- X-authentik-name
- X-authentik-uid
- X-authentik-jwt
- X-authentik-meta-jwks
- X-authentik-meta-outpost
- X-authentik-meta-provider
- X-authentik-meta-app
- X-authentik-meta-version
# Auth for traefik dashboard. Username & password = admin
# Generate new users by: "apt install apache2-utils" and then "htpasswd -nb username password"
traefikAuth:
basicAuth:
users:
- "admin:$apr1$2fcluobe$vf5Hcx2yoJAdPz6cwYs.s."
# Crowdsec bouncer
crowdsec-bouncer:
forwardauth:
address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true
# Plugins (optional)
# Real IP from Cloudflare Proxy/Tunnel
cloudflarewarp:
plugin:
cloudflarewarp:
disableDefault: false
trustip: # Trust IPS not required if disableDefault is false - we will allocate Cloud Flare IPs automatically
- "2400:cb00::/32"
securityHeaders:
headers:
customResponseHeaders:
X-Robots-Tag: "none,noarchive,nosnippet,notranslate,noimageindex" # Prevents search engines from indexing the page and displaying it in search results.
X-Forwarded-Proto: "https" # Indicates that the original request was sent over HTTPS.
server: "" # Hides the server version and type by sending an empty Server header.
sslProxyHeaders:
X-Forwarded-Proto: https # Indicates to the backend server that the original request was sent over HTTPS.
referrerPolicy: "same-origin" # Sends the referrer information only when navigating within the same origin.
hostsProxyHeaders:
- "X-Forwarded-Host" # Provides the original host requested by the client when proxies are used.
customRequestHeaders:
X-Forwarded-Proto: "https"
contentTypeNosniff: true # Prevents browsers from interpreting files as a different MIME type.
browserXssFilter: true # Enables the cross-site scripting (XSS) filter in browsers.
forceSTSHeader: true # Forces browsers to use HTTPS and prevents them from using HTTP.
stsIncludeSubdomains: true # Applies the STS policy to all subdomains.
stsSeconds: 63072000 # Specifies the duration (in seconds) for which the STS policy is in effect.
stsPreload: true # Indicates the domain's intention to be included in browsers' preloaded HSTS lists.
gzip:
compress: {} # Enables gzip compression for responses to reduce bandwidth.
# Only use secure ciphers - https://ssl-config.mozilla.org/#server=traefik&version=2.6.0&config=intermediate&guideline=5.6
tls:
options:
default:
minVersion: VersionTLS12
cipherSuites:
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
cd ..
mkdir crowdsec-logs && cd "$_"
nano acquis.yaml
filenames:
- /var/log/crowdsec/traefik.log
labels:
type: traefik
---
filenames:
- /var/log/auth.log
labels:
type: syslog
cd ..
mkdir crowdsec-config && cd "$_"
touch acquis.yaml && chmod 600 acquis.yaml
cd ..
version: "3.8"
services:
crowdsec:
image: crowdsecurity/crowdsec:latest
container_name: traefik_crowdsec
# restart: unless-stopped
environment:
GID: "${GID-1000}"
COLLECTIONS: "crowdsecurity/linux crowdsecurity/traefik"
volumes:
- ./crowdsec-config/acquis.yaml:/etc/crowdsec/acquis.yaml
- ./crowdsec-config:/etc/crowdsec/
- ./crowdsec-db:/var/lib/crowdsec/data/
# - ./traefik-logs:/var/log/traefik/:ro
ports:
- 6060:6060
networks:
- management
depends_on:
- traefik
bouncer-traefik:
image: docker.io/fbonalair/traefik-crowdsec-bouncer:latest
container_name: traefik_crowdsec_bouncher
restart: unless-stopped
environment:
CROWDSEC_BOUNCER_API_KEY: YOUR_SUPER_SECURE_CROWDSEC_BOUNCER_API_TOKEN
CROWDSEC_AGENT_HOST: crowdsec:8080
GIN_MODE: release
networks:
- management
depends_on:
- crowdsec
traefik:
image: traefik:latest
container_name: traefik
hostname: traefik
restart: unless-stopped
security_opt:
- no-new-privileges:true
ports:
- 80:80 # local http
# - 81:81 # external http
- 443:443 # local https
# - 444:444 # external https
# - 443:443/tcp # Uncomment if you want HTTP3
# - 443:443/udp # Uncomment if you want HTTP3
# - 8192:8080 # Uncomment to enable Dashboard NOT RECOMMENDED FOR PRODUCTION
volumes:
- /etc/localtime:/etc/localtime:ro
- ./traefik-data:/etc/traefik
# - /home/axdocker/docker/grafana/fluent/logs:/logs # UNCOMMENT FOR GRAFANA LOGGING OF TRAEFIK
# - /home/axdocker/docker/grafana/fluent/logs:/var/log/crowdsec # UNCOMMENT FOR GRAFANA LOGGING OF TRAEFIK
- /var/run/docker.sock:/var/run/docker.sock # Disable this when you enable dockersocket
environment:
# DOCKER_HOST: dockersocket
CF_API_EMAIL: [email protected]
CF_DNS_API_TOKEN: YOUR_SUPER_SECURE_CLOUDFLARE_API_TOKEN
networks:
- management
# depends_on:
# - dockersocket
# dockersocket:
# image: tecnativa/docker-socket-proxy
# container_name: dockersocket
# restart: unless-stopped
# volumes:
# - /var/run/docker.sock:/var/run/docker.sock
# networks:
# - management
# environment:
# CONTAINERS: 1
# POST: 0
# privileged: true
error-pages:
image: ghcr.io/tarampampam/error-pages:latest
container_name: error-pages
restart: unless-stopped
environment:
TEMPLATE_NAME: connection # set the error pages template
ports:
- "8176:8080"
# labels:
# traefik.enable: true
# traefik.http.routers.error-pages-router.rule: HostRegexp(`{host:.+}`)
# traefik.http.routers.error-pages-router.priority: 1
# traefik.http.routers.error-pages-router.entrypoints: web
# traefik.http.routers.error-pages-router.middlewares: error-pages-middleware
# traefik.http.middlewares.error-pages-middleware.errors.status: 400-599
# traefik.http.middlewares.error-pages-middleware.errors.service: error-pages-service
# traefik.http.middlewares.error-pages-middleware.errors.query: /{status}/
# traefik.http.services.error-pages-service.loadbalancer.server.port: 8175
depends_on:
- traefik
networks:
- management
networks:
management:
driver: bridge
external: true
docker compose up -d
docker exec traefik_crowdsec cscli bouncers add bouncer-traefik
YOUR_SUPER_SECURE_CROWDSEC_BOUNCER_API_TOKEN
Use the template Edit zone DNS.
Add the following settings
Then click Continue to Summary and then Create Token.
YOUR_SUPER_SECURE_CLOUDFLARE_API_TOKEN
docker compose up -d
Visit the cloudflare DNS records and use the following setup
A traefik YOUR_PUBLIC_IP Proxied Auto
CNAME Records (pointing to Traefik domain):
CNAME myservice traefik.DOMAIN.COM Proxied Auto
CNAME myservice02 traefik.DOMAIN.COM Proxied Auto
CNAME myservice03 traefik.DOMAIN.COM Proxied Auto
CNAME myservice04 traefik.DOMAIN.COM Proxied Auto
CNAME myservice05 traefik.DOMAIN.COM Proxied Auto
docker exec traefik_crowdsec cscli decisions add --ip 192.168.123.123 --duration 24h --reason "web bruteforce"
docker exec traefik_crowdsec cscli decisions list
docker exec traefik_crowdsec cscli decisions list
docker exec traefik_crowdsec cscli decisions delete --ip 192.168.123.123
cd /home/myusername/docker/traefik-crowdsec/crowdsec-config
nano profiles.yaml
Uncomment (remove the hashtag) the following section:
#notifications:
# - http_default
cd notifications
nano http.yaml
type: http # Don't change
name: http_default # Must match the registered plugin in the profile
# One of "trace", "debug", "info", "warn", "error", "off"
log_level: info
# group_wait: # Time to wait collecting alerts before relaying a message to this plugin, eg "30s"
# group_threshold: # Amount of alerts that triggers a message before <group_wait> has expired, eg "10"
# max_retry: # Number of attempts to relay messages to plugins in case of error
# timeout: # Time to wait for response from the plugin before considering the attempt a failure, eg "10s"
#-------------------------
# plugin-specific options
# The following template receives a list of models.Alert objects
# The output goes in the http request body
format: |
{{ range . -}}
{{ $alert := . -}}
{
"extras": {
"client::display": {
"contentType": "text/markdown"
}
},
"priority": 3,
{{range .Decisions -}}
"title": "{{.Type }} {{ .Value }} for {{.Duration}}",
"message": "{{.Scenario}} \n\n[crowdsec cti](https://app.crowdsec.net/cti/{{.Value -}}) \n\n[shodan](https://shodan.io/host/{{.Value -}})"
{{end -}}
}
{{ end -}}
# The plugin will make requests to this url, eg: https://www.example.com/
url: http://IMPORT_GOTFIY_URL_HERE/message
# Any of the http verbs: "POST", "GET", "PUT"...
method: POST
headers:
X-Gotify-Key: IMPORT_CROWDSEC_TOKEN_HERE
Content-Type: application/json
# skip_tls_verification: # true or false. Default is false
CrowdSec
IMPORT_CROWDSEC_TOKEN_HERE
replace with token that you copiedIMPORT_GOTFIY_URL_HERE
replace with the Gotify url that you visit to access GUIdocker compose restart
The following command will send a test notification from crowdsec to gotify
docker exec traefik_crowdsec cscli notifications test http_default
cd /home/myusername/docker
mkdir nginxpm && cd "$_"
mkdir data letsencrypt
version: "3.8"
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
container_name: nginxpm
restart: unless-stopped
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
ports:
- 80:80
- 81:81
- 443:443
docker compose up -d
Default login
Email:[email protected]
Password:changeme
docker network create -d bridge nginx-pmnet
docker network connect nginx-pmnet nginxpm_app_1
docker network connect nginx-pmnet nginxpm_db_1
portainer:
Click on recycle bin to remove ports
Advanced container settings --> Network: remove all the ports
network: nginx-pmnet
Deploy the container
Nginx Proxy Manager:
Add proxy host
Domain names: myapp.DOMAIN.COM
Scheme: http
Forward Hostname / IP: myapp
Forward Port: 80
Cache Assets.
Block Common Exploits.
Websockets Support.
SAVE
cd /home/myusername/docker
mkdir ampache && cd "$_"
version: "3.8"
services:
ampache:
image: ampache/ampache
container_name: ampache
restart: unless-stopped
volumes:
- ./media:/media
ports:
- 8051:80
- 8543:443
docker compose up -d
Enable CREATE DATABASE USER for first time setup web
Ampache Database Username: ampache
Ampache Database User Password: ampache!
Click INSERT DATABASE
Insert Database Password that you just created
For Allow Transcoding:
Template Configuration: ffmpeg
For Players Enable the following:
- Web Interface,
- Ampache API,
- Subsonic,
- UPnP,
- DAAP(iTunes),
- WebDAV.
For Create Admin Account:
Choose differnt username then database name
Username: admin
Password: ampache!
Add a catalog in the Ampache-webapp
Catalog Name: User music
Catalog Type: Local
Path: /media
cd /home/myusername/docker
mkdir navidrome && cd "$_"
version: "3.8"
services:
navidrome:
image: deluan/navidrome:latest
container_name: navidrome
restart: unless-stopped
environment:
ND_SCANINTERVAL: 30m
ND_LOGLEVEL: info
ND_BASEURL:
volumes:
- /path/to/your/music/files:/music:ro
- ./data:/data
ports:
- 4533:4533
docker compose up -d
cd /home/myusername/docker
mkdir snapdrop && cd "$_"
version: "3.8"
services:
snapdrop:
image: lscr.io/linuxserver/snapdrop:latest
container_name: snapdrop
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
volumes:
- ./config:/config
ports:
- 80:80
- 443:443
docker compose up -d
cd /home/myusername/docker
mkdir homeassistant && cd "$_"
version: "3.8"
services:
homeassistant:
image: ghcr.io/home-assistant/home-assistant:stable
container_name: homeassistant
restart: unless-stopped
volumes:
- ./config:/config
- /etc/localtime:/etc/localtime:ro
ports:
- 8260:8123
# depends_on:
# - mariadb
# db:
# image: linuxserver/mariadb
# container_name: homeassistant_db
# restart: unless-stopped
# environment:
# MYSQL_DATABASE: homeassistant
# MYSQL_USER: homeassistant
# MYSQL_PASSWORD: homeassistant!
# MYSQL_ROOT_PASSWORD: homeassistant!!
# volumes:
# - ./db:/var/lib/mysql
# ports:
# - 8261:3306
docker compose up -d
https://github.com/basnijholt/lovelace-ios-themes
sudo apt install jq
sudo apt install network-manager
sudo curl -sL https://github.com/Kanga-Who/home-assistant/blob/master/supervised-installer.sh | bash -s
cd /home/myusername/docker
mkdir jellyfin && cd "$_"
version: "3.8"
services:
jellyfin:
image: jellyfin/jellyfin:latest
container_name: jellyfin
restart: unless-stopped
environment:
PGID: 1000
PUID: 1000
UMASK: 002
TZ: Etc/UTC
devices:
- /dev/dri:/dev/dri
volumes:
- /media/disk/DISKNAME/SERIESPATH:/media/TV
- /media/disk/DISKNAME/MOVIEPATH:/media/movies
- /media/disk/DISKNAME/MUSICPATH:/media/music
- /media/disk/DISKNAME/PICTURESPATH:/media/pictures
- /media/disk/DISKNAME/BOOKSPATH:/media/books
- /dev/shm:/transcode
- ./config:/config
- ./cache:/cache
ports:
- 8110:8096
networks:
- jellyfin-net
networks:
jellyfin-net:
driver: bridge
docker compose up -d
Account --> Dashboard --> Plugins --> Repositories
Repositories name: jellyfinPluginMan
Repository URL: https://raw.githubusercontent.com/danieladov/JellyfinPluginManifest/master/manifest.json
Repositories name: Robiro
Repository URL: https://repo.codyrobibero.dev/manifest.json
cd /home/myusername/docker
mkdir jellyseerr && cd "$_"
version: "3.8"
services:
jellyseerr:
image: fallenbagel/jellyseerr:latest
container_name: jellyseerr
restart: unless-stopped
environment:
TZ: Etc/UTC
LOG_LEVEL: debug
volumes:
- ./config:/app/config
ports:
- 8111:5055
docker compose up -d
cd /home/myusername/docker
mkdir tixati && cd "$_"
version: "3.8"
services:
tixati:
image: kyzimaspb/tixati:latest
container_name: tixati
restart: unless-stopped
environment:
XVFB_RESOLUTION: 1000x900x24 # optional
VNC_SERVER_PASSWORD: tixati! # optional
volumes:
- ./downloads:/home/user/Desktop/downloads
- ./torrent-files:/home/user/Desktop/torrent-files
- ./config:/home/user/.config
ports:
- 8117:5900
docker compose up -d
cd /home/myusername/docker
mkdir unbound && cd "$_"
version: "3.8"
services:
unbound:
image: mvance/unbound:latest
container_name: unbound
restart: unless-stopped
healthcheck:
disable: true
volumes:
- ./data:/opt/unbound/etc/unbound
ports:
- 8120:53/tcp
- 8120:53/udp
cd /home/myusername/docker
mkdir pihole && cd "$_"
This method will also automaticly install the pi-hole container with the needed LAN network
Replace <your_networkcard_name> with the name of the network card you want to use.
In the macvlanconfig network configuration, set the subnet, gateway, and ip_range values to match the desired configuration for your MacVLAN network.
In the LAN network configuration, set the subnet, gateway, and ip_range values to match the desired configuration for your LAN network.
version: "3.8"
networks:
macvlanconfig:
attachable: false
internal: false
driver_opts:
parent: <your_networkcard_name>
ipam:
driver: default
config:
- subnet: 192.168.1.0/24
gateway: 192.168.1.1
ip_range: 192.168.1.100/24
LAN:
driver: macvlan
attachable: true
internal: false
driver_opts:
parent: <your_networkcard_name>
ipam:
driver: default
config:
- subnet: 192.168.1.0/24
gateway: 192.168.1.1
ip_range: 192.168.1.100/24
services:
pihole:
image: pihole/pihole:latest
container_name: pihole
hostname: pihole01
restart: unless-stopped
# Recommended but not required (DHCP needs NET_ADMIN)
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
cap_add:
- NET_ADMIN
environment:
TZ: Etc/UTC
WEBPASSWORD: pihole!
volumes:
- ./data:/etc/pihole
- ./dnsmasq:/etc/dnsmasq.d
# - ./lighttpd:/etc/lighttpd
- ./errorpage:/var/www/html/pihole
ports:
- 8124:53/tcp
- 8124:53/udp
- 8123:67/udp
- 8122:80/tcp
- 8121:443/tcp
networks:
LAN:
ipv4_address: 192.168.1.123
This method will only install the pi-hole container. You will need to create LAN network manually with portainer
version: "3.8"
services:
pihole:
image: pihole/pihole:latest
container_name: pihole
restart: unless-stopped
# Recommended but not required (DHCP needs NET_ADMIN)
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
cap_add:
- NET_ADMIN
environment:
TZ: Etc/UTC
WEBPASSWORD: pihole!
volumes:
- ./data:/etc/pihole
- ./dnsmasq:/etc/dnsmasq.d
# - ./lighttpd:/etc/lighttpd
- ./errorpage:/var/www/html/pihole
ports:
- 8124:53/tcp
- 8124:53/udp
- 8123:67/udp
- 8122:80/tcp
- 8121:443/tcp
docker compose up -d
Go to your portainer.
Go to Networks and select + Add network
Name it: macvlanconfig
For Driver select: macvlan
For Parent network card insert your networkcard name
You can find your network name by doing ifconfig in your SSH console
Subnet: 192.168.1.0/24
Gateway: 192.168.1.1
IP range: 192.168.1.0/24
Click Create the network
Select again + Add network
Name it: LAN
For Driver select: macvlan
Select Creation I want to create a network from a configuration
For Configuration select: macvlanconfig
Select Enable manual container attachment Make sure its on.
Click Create the network
Go to Containers and select pihole
Click on Duplicate/Edit
Click on Network
For Network select: LAN
Optional: You can set the hostname to pihole01 for example to recognize the hostname better
Sourch: How To Setup MacVLAN in Portainer: https://www.youtube.com/watch?v=o7nn6Tv-PAw
Visit: http://IMPORT_PIHOLE_URL_HERE/admin/login.php
Default login
Password:pihole!
Tracking & Telemetry Lists - https://firebog.net/ GREEN
https://v.firebog.net/hosts/Easyprivacy.txt
https://v.firebog.net/hosts/Prigent-Ads.txt
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts
https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt
Suspicious Lists - https://firebog.net/ GREEN
https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts
https://v.firebog.net/hosts/static/w3kbl.txt
Other Lists - https://firebog.net/ GREEN
https://zerodot1.gitlab.io/CoinBlockerLists/hosts_browser
Migrated from /etc/pihole/adlists.list
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
Malicious Lists - https://firebog.net/ GREEN
https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt
https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt
https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt
https://v.firebog.net/hosts/Prigent-Crypto.txt
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts
https://bitbucket.org/ethanr/dns-blacklists/raw/8575c9f96e5b4a1308f2f12394abd86d0927a4a0/bad_lists/Mandiant_APT1_Report_Appendix_D.txt
https://phishing.army/download/phishing_army_blocklist_extended.txt
https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt
https://v.firebog.net/hosts/RPiList-Malware.txt
https://v.firebog.net/hosts/RPiList-Phishing.txt
https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt
https://raw.githubusercontent.com/AssoEchap/stalkerware-indicators/master/generated/hosts
https://urlhaus.abuse.ch/downloads/hostfile/
Advertising Lists - https://firebog.net/ GREEN
https://adaway.org/hosts.txt
https://v.firebog.net/hosts/AdguardDNS.txt
https://v.firebog.net/hosts/Admiral.txt
https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt
https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
https://v.firebog.net/hosts/Easylist.txt
https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/UncheckyAds/hosts
https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts
More information: https://www.youtube.com/watch?v=0wpn3rXTe0g
http://192.168.x.x/admin/index.php
mydockerhost.local.DOMAIN.COM
YOUR_DOCKER_HOST_IP_HERE
myservice.local.DOMAIN.COM
mydockerhost.local.DOMAIN.COM
nano pihole/dnsmasq/05-pihole-custom-cname.conf
cname=myservice.local.DOMAIN.COM,mydockerhost.local.DOMAIN.COM
cname=myservice02.local.DOMAIN.COM,mydockerhost.local.DOMAIN.COM
cname=myservice03.local.DOMAIN.COM,mydockerhost.local.DOMAIN.COM
cname=myservice04.local.DOMAIN.COM,mydockerhost.local.DOMAIN.COM
cname=myservice05.local.DOMAIN.COM,mydockerhost.local.DOMAIN.COM
cd /home/myusername/docker/pihole/lighttpd
nano external.conf
server.error-handler-404 = "/pihole/pi-error.php"
cd /home/myusername/docker/pihole/errorpage
Download and Import the following file into the errorpage folder
pi-error.php
docker restart pihole
docker exec -it pihole /bin/bash
sudo nano /etc/lighttpd/lighttpd.conf
sudo chmod +x /var/www/html/pihole/pi-error.php
sudo service lighttpd restart
sudo service pihole-FTL restart
cd /home/myusername/docker
mkdir netbootxyz && cd "$_"
version: "3.8"
services:
netbootxyz:
image: lscr.io/linuxserver/netbootxyz:latest
container_name: netbootxyz
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
# MENU_VERSION: 1.9.9 #optional unset is latest version
PORT_RANGE: 30000:30010 #optional
SUBFOLDER: / #optional
volumes:
- ./config:/config
- ./assets:/assets
ports:
- 9170:3000
- 9171:69/udp
- 9172:80 #optional
samba:
image: servercontainers/samba:latest
# image: ghcr.io/servercontainers/samba
container_name: netbootxyz-samba
restart: unless-stopped
environment:
MODEL: 'TimeCapsule'
AVAHI_NAME: netbootsamba
SAMBA_CONF_LOG_LEVEL: 3
# uncomment to disable optional services
# WSDD2_DISABLE: 1
# AVAHI_DISABLE: 1
GROUP_family: 1500
ACCOUNT_netboot: netboot123
UID_netboot: 1000
GROUPS_netboot: family
SAMBA_VOLUME_CONFIG_netbootonly: "[netboot]; path=/shares/netboot; valid users = netboot; guest ok = no; read only = no; browseable = yes"
SAMBA_VOLUME_CONFIG_shared_home: "[Home]; path=/shares/homes/%U; valid users = netboot, guest ok = no; read only = no; browseable = yes"
SAMBA_VOLUME_CONFIG_public: "[Public]; path=/shares/public; valid users = netboot, guest ok = no; read only = no; browseable = yes; force group = family"
SAMBA_VOLUME_CONFIG_public_ro: "[Public ReadOnly]; path=/shares/public; guest ok = yes; read only = yes; browseable = yes; force group = family"
SAMBA_VOLUME_CONFIG_timemachine: "[TimeMachine]; path=/shares/timemachine/%U; valid users = netboot, guest ok = no; read only = no; browseable = yes; fruit:time machine = yes; fruit:time machine max size = 500G"
volumes:
- /etc/avahi/services/:/external/avahi
- ./samba/shares/netboot:/shares/netboot
- ./samba/shares/public:/shares/public
- ./samba/shares/homes:/shares/homes
- ./samba/shares/timemachine:/shares/timemachine
Make sure the Pi-Hole DHCP server is enabled!!!
Settings -> DHCP
cd /home/myusername/docker/pihole/dnsmasq
nano 10-TFTP.conf
dhcp-match=set:bios,60,PXEClient:Arch:00000
dhcp-boot=tag:bios,netboot.xyz.kpxe,,YOUR_NETBOOTXYZ_IP
dhcp-match=set:efi32,60,PXEClient:Arch:00002
dhcp-boot=tag:efi32,netboot.xyz.efi,,YOUR_NETBOOTXYZ_IP
dhcp-match=set:efi32-1,60,PXEClient:Arch:00006
dhcp-boot=tag:efi32-1,netboot.xyz.efi,,YOUR_NETBOOTXYZ_IP
dhcp-match=set:efi64,60,PXEClient:Arch:00007
dhcp-boot=tag:efi64,netboot.xyz.efi,,YOUR_NETBOOTXYZ_IP
dhcp-match=set:efi64-1,60,PXEClient:Arch:00008
dhcp-boot=tag:efi64-1,netboot.xyz.efi,,YOUR_NETBOOTXYZ_IP
dhcp-match=set:efi64-2,60,PXEClient:Arch:00009
dhcp-boot=tag:efi64-2,netboot.xyz.efi,,YOUR_NETBOOTXYZ_IP
https://github.com/deffcolony/ai-toolbox/blob/main/etc/windows/adk-launcher.bat
https://github.com/deffcolony/ai-toolbox/blob/main/etc/windows/adk/startnet.cmd
https://github.com/deffcolony/ai-toolbox/blob/main/etc/windows/adk/unattend.xml
Edit startnet.cmd so it matches your setup
Run adk-launcher.bat
cd /home/myusername/docker/netbootxyz/assets
mkdir -p ./winpe/x64 &&
cd /home/myusername/docker/netbootxyz/samba/shares/netboot/
mkdir -p ./assets/windows/{10,11}
Extract WinPE_amd64.iso then upload all files into x64 folder
Extract Windows 11 or 10 iso then upload all files into /assets/windows/11 or 10 folder
Visit http://YOUR_NETBOOTXYZ_IP:3000
Go to Menus > boot.cfg
Add or replace the following line set win_base_url http://YOUR_NETBOOTXYZ_SAMBA_IP:80/WinPE
Do a networkboot and go to Distributions: > Windows > Load Microsoft Windows Installer....
If Base URL is emty then set it manually:
http://YOUR_NETBOOTXYZ_SAMBA_IP/winpe
cd /home/myusername/docker
mkdir -p privatebin/config && cd privatebin/config
Download the conf.php below and drop it into config folder
conf.php
cd ..
version: "3.8"
services:
privatebin:
image: privatebin/nginx-fpm-alpine:latest
container_name: privatebin
restart: unless-stopped
read_only: true
volumes:
- ./data:/srv/data
- ./config/conf.php:/srv/cfg/conf.php:ro
ports:
- 9215:8080
docker compose up -d
cd /home/myusername/docker
mkdir hastebin && cd "$_"
version: "3.8"
services:
hastebin:
image: arminfriedl/hastebin:latest
container_name: hastebin
restart: unless-stopped
volumes:
- ./data:/app/data
ports:
- 8132:7777
docker compose up -d
cd /home/myusername/docker
mkdir nextcloud && cd "$_"
services:
db:
image: postgres:16
container_name: nextcloud_postgres
restart: unless-stopped
user: ${UID}:${GID}
volumes:
- ./db:/var/lib/postgresql/data
# - /etc/passwd:/etc/passwd:ro
env_file:
- .env
healthcheck:
test: ["CMD-SHELL", "pg_isready --dbname='${POSTGRES_DB}' --username='${POSTGRES_USER}' || exit 1"]
interval: 5m
start_period: 30s
timeout: 5s
retries: 3
networks:
- nextcloud-net
app:
image: nextcloud:latest
container_name: nextcloud
restart: unless-stopped
user: ${UID}:${GID}
env_file:
- .env
volumes:
- ./nextcloud:/var/www/html
- ./apps:/var/www/html/custom_apps
- ./data:/var/www/html/data
- ./config:/var/www/html/config
- ./redis-session.ini:/usr/local/etc/php/conf.d/redis-session.ini
- ./remoteip.conf:/etc/apache2/conf-available/remoteip.conf:ro
ports:
- 8330:80
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- nextcloud-net
notify_push:
image: nextcloud:latest
container_name: nextcloud_push
restart: unless-stopped
user: 1004:1004
environment:
TZ: Etc/UTC
PORT: 7867
NEXTCLOUD_URL: http://app # don't go through the proxy to contact the nextcloud server
entrypoint: /var/www/html/custom_apps/notify_push/bin/x86_64/notify_push /var/www/html/config/config.php
volumes:
- ./apps:/var/www/html/custom_apps
- ./config:/var/www/html/config
ports:
- 8331:7867
depends_on:
- app
networks:
- nextcloud-net
cron:
image: nextcloud:latest
container_name: nextcloud_cron
restart: unless-stopped
# special UID handling https://github.com/nextcloud/docker/issues/1740
environment:
TZ: ${TIMEZONE}
UID: ${UID}
env_file:
- .env
volumes:
- ./nextcloud:/var/www/html
- ./apps:/var/www/html/custom_apps
- ./data:/var/www/html/data
- ./config:/var/www/html/config
- ./cron.sh:/cron.sh
entrypoint: /cron.sh
depends_on:
- app
networks:
- nextcloud-net
redis:
image: redis:bookworm
container_name: nextcloud_redis
restart: unless-stopped
user: ${UID}:${GID}
command:
- --save ""
# volumes:
# - ./redis:/data
environment:
TZ: ${TIMEZONE}
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 10s
interval: 30s
retries: 3
timeout: 3s
networks:
- nextcloud-net
imaginary:
image: nextcloud/aio-imaginary:latest
container_name: nextcloud_imaginary
restart: unless-stopped
user: ${UID}:${GID}
expose:
- "9000"
environment:
TZ: ${TIMEZONE}
cap_add:
- SYS_NICE
tmpfs:
- /tmp
depends_on:
- app
networks:
- nextcloud-net
nextcloud-collabora:
image: collabora/code
container_name: nextcloud_collabora
restart: unless-stopped
ports:
- 8332:9980
# expose:
# - "9980"
environment:
#should work as "domain=cloud1\.nextcloud\.com|cloud2\.nextcloud\.com"
- domain=${COLLABORA_DOMAINS}
- 'dictionaries=en_US,nl_NL'
- VIRTUAL_PROTO=http
- VIRTUAL_PORT=9980
- VIRTUAL_HOST=${COLLABORA_FQDN}
- "extra_params=--o:ssl.enable=false --o:ssl.termination=true"
env_file:
- .env
cap_add:
- MKNOD
tty: true
networks:
- nextcloud-net
networks:
nextcloud-net:
external: true
Download nextcloud_launcher.sh
then drop it into the nextcloud folder
sudo chmod +x nextcloud_launcher.sh && sudo ./nextcloud_launcher.sh
cd /home/myusername/docker/traefik-crowdsec/traefik-data
nano fileConfig.yml
http:
routers:
##########################################################
###======================ROUTERS======================###
### nextcloud - router ###
nextcloud:
entryPoints:
- https
rule: "Host(`nextcloud.DOMAIN.COM`) || Host(`nextcloud.local.DOMAIN.COM`)"
service: nextcloud
priority: 1
# nextcloud push - router
nextcloud-push:
rule: "Host(`nextcloud.DOMAIN.COM`) && PathPrefix(`/push`)"
service: nextcloud-push
priority: 2
# collabora - router
collabora:
rule: "Host(`collabora.DOMAIN.COM`) || Host(`collabora.local.DOMAIN.COM`)"
service: collabora
##########################################################
###======================SERVICES======================###
services:
### nextcloud - service ###
nextcloud:
loadBalancer:
servers:
- url: http://192.168.1.x:8330
# nextcloud push - service
nextcloud-push:
loadBalancer:
servers:
- url: http://192.168.1.x:8331
# collabora - service
collabora:
loadBalancer:
servers:
- url: http://192.168.1.x:8332
cd /home/myusername/docker
mkdir whoogle && cd "$_"
version: "3.8"
services:
whoogle:
image: benbusby/whoogle-search:latest
container_name: whoogle
restart: unless-stopped
user: root
volumes:
- ./config:/config
ports:
- 8140:5000
docker compose up -d
cd /home/myusername/docker
mkdir duckdns && cd "$_"
mkdir config
version: "3.8"
services:
duckdns:
image: ghcr.io/linuxserver/duckdns
container_name: duckdns
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
# subdomain example: nicecloud.duckdns.org
SUBDOMAINS: subdomain1,subdomain2,subdomain3
TOKEN: token
LOG_FILE: false
volumes:
- ./config:/config
docker compose up -d
cd /home/myusername/docker
mkdir workadventure
cd workadventure
touch .env
touch docker-compose.yml
# Server Information
SERVER_NAME=
SERVER_MOTD=
SERVER_ICON=
DEBUG_MODE=false
JITSI_URL=meet.jit.si
# If your Jitsi environment has authentication set up, you MUST set JITSI_PRIVATE_MODE to "true" and you MUST pass a SECRET_JITSI_KEY to generate the JWT secret
JITSI_PRIVATE_MODE=false
JITSI_ISS=
SECRET_JITSI_KEY=
# Jitsi settings for the low-level Jitsi API (used by the live-streaming area)
# JITSI_DOMAIN is the domain name of your Jitsi web instance (only the domain name, not the full URL)
JITSI_DOMAIN=
# JITSI_XMPP_DOMAIN is the domain name used by Prosody.
# You can find this value in the Jitsi config.js file ("hosts.domain" key)
# If you are using Jitsi Docker install, this is the value of the XMPP_DOMAIN environment variable.
JITSI_XMPP_DOMAIN=
# JITSI_XMPP_MUC_DOMAIN is the domain name used by Prosody for MUC.
# You can find this value in the Jitsi config.js file ("hosts.muc" key)
# If you are using Jitsi Docker install, this is the value of the XMPP_MUC_DOMAIN environment variable.
JITSI_MUC_DOMAIN=
# BigBlueButton settings.
# From your BBB instance, you can get the correct values using the command: "bbb-conf --secret"
# This defaults to a test instance kindly provided by blindsidenetworks.com. Please change this in production settings.
BBB_URL=https://test-install.blindsidenetworks.com/bigbluebutton/
BBB_SECRET=8cd8ef52e8e101574e400365b55e11a6
ADMIN_API_URL=
ADMIN_API_TOKEN=123
START_ROOM_URL=/_/global/maps.workadventure.localhost/starter/map.json
MAP_STORAGE_URL=map-storage:50053
# If your Turn server is configured to use the Turn REST API, you should put the shared auth secret here.
# If you are using Coturn, this is the value of the "static-auth-secret" parameter in your coturn config file.
# Keep empty if you are sharing hard coded / clear text credentials.
TURN_STATIC_AUTH_SECRET=
TURN_SERVER=
# You can uncomment the 2 lines below and the Coturn section in docker-compose.yaml to test this behaviour locally
#TURN_SERVER=turn:coturn.workadventure.localhost:3478,turns:coturn.workadventure.localhost:5349
#TURN_STATIC_AUTH_SECRET=SomeStaticAuthSecret
DISABLE_NOTIFICATIONS=true
SKIP_RENDER_OPTIMIZATIONS=false
# The email address used by Let's encrypt to send renewal warnings (compulsory)
ACME_EMAIL=
MAX_PER_GROUP=4
MAX_USERNAME_LENGTH=10
# Configure low and recommended bandwidth used by video and screen share in the peer-to-peer connection (in kbit/s)
PEER_VIDEO_LOW_BANDWIDTH=150
PEER_VIDEO_RECOMMENDED_BANDWIDTH=150
PEER_SCREEN_SHARE_LOW_BANDWIDTH=250
PEER_SCREEN_SHARE_RECOMMENDED_BANDWIDTH=1000
OPID_CLIENT_ID=
OPID_CLIENT_SECRET=
OPID_CLIENT_ISSUER=
OPID_PROFILE_SCREEN_PROVIDER=
OPID_PROMPT=login
OPID_LOCALE_CLAIM=
OPID_LOGOUT_REDIRECT_URL=
DISABLE_ANONYMOUS=
OPID_SCOPE=
OPID_USERNAME_CLAIM=
OPID_TAGS_CLAIM=
# Whether the user can choose its name or if the name is dictated by OpenID.
# Can be one of "user_input", "allow_override_opid", "force_opid"
# This setting is only useful if DISABLE_ANONYMOUS=true
# user_input: the user will be prompted for his/her Woka name
# force_opid: the user cannot decide his/her Woka name
# allow_override_opid: by default, the user name from OpenID is used, but the user can change it
OPID_WOKA_NAME_POLICY=
# If you want to have a contact page in your menu, you MUST set CONTACT_URL to the URL of the page that you want
CONTACT_URL=
# Prometheus settings
## Uncomment this to enable the /metrics Prometheus endpoint.
## To hit this endpoint, you will need to configure Prometheus with:
## authorization:
## type: Bearer
## credentials: "[The value of PROMETHEUS_AUTHORIZATION_TOKEN env variable]"
PROMETHEUS_AUTHORIZATION_TOKEN=
# The maximum time to live of player variables for logged players, expressed in seconds (no limit by default).
# Use "-1" for infinity.
# Note that anonymous players don't have any TTL limit because their data is stored in local storage, not in Redis database.
PLAYER_VARIABLES_MAX_TTL=-1
# MAP EDITOR SETTINGS
ENABLE_MAP_EDITOR=true
# If you want to allow only some users to access the map editor, you can set the list of authorized users here, email separated by commas. (Only possible if OpenID Connect is configured)
# Leave blank if you want to allow all users to access the map editor.
# This variable is ignored if an AdminAPI is configured
MAP_EDITOR_ALLOWED_USERS=
# AWS environement variable for uploader
# AWS_ACCESS_KEY_ID=minio-access-key
# AWS_SECRET_ACCESS_KEY=minio-secret-access-key
# AWS_DEFAULT_REGION=eu-west-1
# AWS_BUCKET=workadventure-bucket
# AWS_ENDPOINT=http://cdn.workadventure.localhost/
#
# Time for which signed urls are valid (in seconds)
# UPLOADER_AWS_SIGNED_URL_EXPIRATION=60
# Redis for uploader service of WorkAdventure
## The uploader service stores all files uploaded by the chat service
## 2 possibilities to setup the uploader storage: AWS, REDIS
### AWS with all environement variable AWS
### Redis with this environment variable
UPLOADER_REDIS_HOST=redis
UPLOADER_REDIS_PORT=6379
UPLOADER_REDIS_DB_NUMBER=1
###############################
# Chat environement variables #
###############################
EJABBERD_JWT_SECRET=mySecretJwt
EJABBERD_DOMAIN=ejabberd
EJABBERD_USER=admin
EJABBERD_PASSWORD=admin
# Max day of chat history that can be fetched by users
## No restriction is : 0 or not defined value
# MAX_HISTORY_CHAT=0
# Embedely key api for rich media embeds
## used in the chat service and the map editor
EMBEDLY_KEY=
# Iframely key api for rich media embeds
## used in the chat service and the map editor
IFRAMELY_KEY=
# Enable / disable chat
ENABLE_CHAT=true
# Enable / disable upload of file in chat (MUST BE TRUE ONLY IF ENABLE_CHAT IS TRUE)
ENABLE_CHAT_UPLOAD=true
ENABLE_CHAT_ONLINE_LIST=true
ENABLE_CHAT_DISCONNECTED_LIST=true
# Chat max uploadable file size (Byte)
UPLOAD_MAX_FILESIZE=10485760
# JWT secret key
SECRET_KEY=yourSecretKey2020
# Report issues menu
ENABLE_REPORT_ISSUES_MENU=false
REPORT_ISSUES_URL=
# LogRocket
LOGROCKET_ID=
# Sentry integration
## Find the DSN in the Sentry UI
SENTRY_DSN_FRONT=
SENTRY_DSN_PUSHER=
SENTRY_DSN_MAPSTORAGE=
SENTRY_DSN_BACK=
SENTRY_DSN_CHAT=
## Find the the release name in the Sentry UI
SENTRY_RELEASE=local
SENTRY_ENVIRONMENT=local
SENTRY_ORG=
SENTRY_PROJECT=
# RoomAPI
ROOM_API_SECRET_KEY=
# Integration tools
KLAXOON_ENABLED=false
KLAXOON_CLIENT_ID=
YOUTUBE_ENABLED=true
GOOGLE_DRIVE_ENABLED=true
GOOGLE_DOCS_ENABLED=true
GOOGLE_SHEETS_ENABLED=true
GOOGLE_SLIDES_ENABLED=true
GOOGLE_DRIVE_ENABLED=true
ERASER_ENABLED=true
EXCALIDRAW_ENABLED=true
EXCALIDRAW_DOMAINS=
# If you want to force allow some domains to be embedded in WorkAdventure, you can set the list of authorized domains here, separated by ','.
# Example: EMBEDDED_DOMAINS_WHITELIST=klaxoon.com,google.com
EMBEDDED_DOMAINS_WHITELIST=
# Google drive picker
GOOGLE_DRIVE_PICKER_CLIENT_ID=
GOOGLE_DRIVE_PICKER_APP_ID=
services:
reverse-proxy:
image: traefik:v2.8
restart: ${RESTART_POLICY}
command:
- --log.level=${LOG_LEVEL}
- --providers.docker
- --providers.docker.exposedbydefault=false
# Entry points
- --entryPoints.web.address=:80
- --entrypoints.web.http.redirections.entryPoint.to=websecure
- --entrypoints.web.http.redirections.entryPoint.scheme=https
- --entryPoints.websecure.address=:443
- --entryPoints.grpc.address=:50051
# HTTP challenge
- --certificatesresolvers.myresolver.acme.email=${ACME_EMAIL}
- --certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json
- --certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=web
# Let's Encrypt's staging server
# uncomment during testing to avoid rate limiting
#- --certificatesresolvers.dnsresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory
ports:
- "${HTTP_PORT}:80"
- "${HTTPS_PORT}:443"
- "${GRPC_PORT}:50051"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ${DATA_DIR}/letsencrypt/:/letsencrypt/
play:
image: thecodingmachine/workadventure-play:${VERSION}
restart: ${RESTART_POLICY}
environment:
- DEBUG_MODE
- JITSI_URL
- JITSI_PRIVATE_MODE
- ENABLE_MAP_EDITOR
- MAP_EDITOR_ALLOWED_USERS
- PUSHER_URL=https://${DOMAIN}/
- ICON_URL=/icon
- TURN_SERVER
- TURN_USER
- TURN_PASSWORD
- TURN_STATIC_AUTH_SECRET
- STUN_SERVER
- SKIP_RENDER_OPTIMIZATIONS
- MAX_PER_GROUP
- MAX_USERNAME_LENGTH
- DISABLE_ANONYMOUS
- DISABLE_NOTIFICATIONS
- SECRET_KEY
- API_URL=back:50051
- FRONT_URL=/
- CHAT_URL=/chat/
- INTERNAL_MAP_STORAGE_URL=http://map-storage:3000
- PUBLIC_MAP_STORAGE_URL=https://${DOMAIN}/map-storage
- START_ROOM_URL
- OPID_PROMPT=login
- OPID_WOKA_NAME_POLICY
- OPID_CLIENT_ID
- OPID_CLIENT_SECRET
- OPID_CLIENT_ISSUER
- OPID_PROFILE_SCREEN_PROVIDER
- OPID_SCOPE
- OPID_USERNAME_CLAIM
- OPID_LOCALE_CLAIM
- OPID_LOGOUT_REDIRECT_URL
- ENABLE_CHAT
- ENABLE_CHAT_UPLOAD
- ENABLE_CHAT_ONLINE_LIST
- ENABLE_CHAT_DISCONNECTED_LIST
- UPLOADER_URL=/uploader
# Only used if you set up a JWT authentication mechanism in Ejabberd
- EJABBERD_JWT_SECRET=${EJABBERD_JWT_SECRET}
- EJABBERD_DOMAIN=${EJABBERD_DOMAIN}
# Report issues menu
- ENABLE_REPORT_ISSUES_MENU=${ENABLE_REPORT_ISSUES_MENU}
- REPORT_ISSUES_URL=${REPORT_ISSUES_URL}
- ENABLE_OPENAPI_ENDPOINT=true
- ADMIN_API_TOKEN
- ADMIN_API_URL
- ADMIN_URL
- ROOM_API_PORT=50051
- ROOM_API_SECRET_KEY=${ROOM_API_SECRET_KEY}
- GRPC_VERBOSITY=DEBUG
- GRPC_TRACE=all
- SENTRY_ORG=${SENTRY_ORG}
- SENTRY_PROJECT=${SENTRY_PROJECT}
- SENTRY_DSN_FRONT=${SENTRY_DSN_FRONT}
- SENTRY_DSN_PUSHER=${SENTRY_DSN_PUSHER}
- SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
- SENTRY_RELEASE=${SENTRY_RELEASE}
- SENTRY_TRACES_SAMPLE_RATE=${SENTRY_TRACES_SAMPLE_RATE}
- JITSI_DOMAIN
- JITSI_XMPP_DOMAIN
- JITSI_MUC_DOMAIN
- WOKA_SPEED
- FEATURE_FLAG_BROADCAST_AREAS=${FEATURE_FLAG_BROADCAST_AREAS}
# Tools integration
- KLAXOON_ENABLED=${KLAXOON_ENABLED}
- KLAXOON_CLIENT_ID=${KLAXOON_CLIENT_ID}
- YOUTUBE_ENABLED=${YOUTUBE_ENABLED}
- GOOGLE_DRIVE_ENABLED=${GOOGLE_DRIVE_ENABLED}
- GOOGLE_DOCS_ENABLED=${GOOGLE_DOCS_ENABLED}
- GOOGLE_SHEETS_ENABLED=${GOOGLE_SHEETS_ENABLED}
- GOOGLE_SLIDES_ENABLED=${GOOGLE_SLIDES_ENABLED}
- ERASER_ENABLED=${ERASER_ENABLED}
- EXCALIDRAW_ENABLED=${EXCALIDRAW_ENABLED}
- EXCALIDRAW_DOMAINS=${EXCALIDRAW_DOMAINS}
- EMBEDDED_DOMAINS_WHITELIST=${EMBEDDED_DOMAINS_WHITELIST}
- PEER_VIDEO_LOW_BANDWIDTH=${PEER_VIDEO_LOW_BANDWIDTH}
- PEER_VIDEO_RECOMMENDED_BANDWIDTH=${PEER_VIDEO_RECOMMENDED_BANDWIDTH}
- PEER_SCREEN_SHARE_LOW_BANDWIDTH=${PEER_SCREEN_SHARE_LOW_BANDWIDTH}
- PEER_SCREEN_SHARE_RECOMMENDED_BANDWIDTH=${PEER_SCREEN_SHARE_RECOMMENDED_BANDWIDTH}
# Google drive picker
- GOOGLE_DRIVE_PICKER_CLIENT_ID=${GOOGLE_DRIVE_PICKER_CLIENT_ID}
- GOOGLE_DRIVE_PICKER_APP_ID=${GOOGLE_DRIVE_PICKER_APP_ID}
labels:
traefik.enable: "true"
traefik.http.routers.play.rule: "Host(`${DOMAIN}`) && PathPrefix(`/`)"
traefik.http.routers.play.entryPoints: "web"
traefik.http.services.play.loadbalancer.server.port: "3000"
traefik.http.routers.play-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/`)"
traefik.http.routers.play-ssl.entryPoints: "websecure"
traefik.http.routers.play-ssl.tls: "true"
traefik.http.routers.play-ssl.tls.certresolver: "myresolver"
traefik.http.routers.play-ssl.service: "play"
traefik.http.routers.room-api.rule: "Host(`${DOMAIN}`)"
traefik.http.routers.room-api.entryPoints: "grpc"
traefik.http.routers.room-api.service: "room-api"
traefik.http.services.room-api.loadbalancer.server.port: "50051"
traefik.http.services.room-api.loadbalancer.server.scheme: "h2c"
traefik.http.routers.room-api.tls: "true"
traefik.http.routers.room-api.tls.certresolver: "myresolver"
chat:
image: thecodingmachine/workadventure-chat:${VERSION}
restart: ${RESTART_POLICY}
environment:
- PUSHER_URL=/
- UPLOADER_URL=/uploader
- EMBEDLY_KEY=${EMBEDLY_KEY}
- ENABLE_CHAT_UPLOAD=${ENABLE_CHAT_UPLOAD}
- EJABBERD_DOMAIN=${EJABBERD_DOMAIN}
- EJABBERD_WS_URI=wss://${DOMAIN}/xmpp/ws
- SENTRY_DSN=${SENTRY_DSN_CHAT}
- SENTRY_ENVIRONMENT=${SENTRY_ENVIRONMENT}
- SENTRY_ORG=${SENTRY_ORG}
- SENTRY_PROJECT=${SENTRY_PROJECT}
- SENTRY_AUTH_TOKEN=${SENTRY_AUTH_TOKEN}
- SENTRY_RELEASE=${SENTRY_RELEASE}
- SENTRY_TRACES_SAMPLE_RATE=${SENTRY_TRACES_SAMPLE_RATE}
labels:
traefik.enable: "true"
traefik.http.middlewares.strip-chat-prefix.stripprefix.prefixes: "/chat"
traefik.http.routers.chat.rule: "Host(`${DOMAIN}`) && PathPrefix(`/chat`)"
traefik.http.routers.chat.middlewares: "strip-chat-prefix@docker"
traefik.http.routers.chat.entryPoints: "web"
traefik.http.services.chat.loadbalancer.server.port: "80"
traefik.http.routers.chat-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/chat`)"
traefik.http.routers.chat-ssl.middlewares: "strip-chat-prefix@docker"
traefik.http.routers.chat-ssl.entryPoints: "websecure"
traefik.http.routers.chat-ssl.service: "chat"
traefik.http.routers.chat-ssl.tls: "true"
traefik.http.routers.chat-ssl.tls.certresolver: "myresolver"
back:
image: thecodingmachine/workadventure-back:${VERSION}
environment:
- PLAY_URL=https://${DOMAIN}
- SECRET_JITSI_KEY
- ENABLE_MAP_EDITOR
- SECRET_KEY
- ADMIN_API_TOKEN
- ADMIN_API_URL
- TURN_SERVER
- TURN_USER
- TURN_PASSWORD
- TURN_STATIC_AUTH_SECRET
- STUN_SERVER
- JITSI_URL
- JITSI_ISS
- BBB_URL
- BBB_SECRET
- MAX_PER_GROUP
- STORE_VARIABLES_FOR_LOCAL_MAPS
- REDIS_HOST=redis
- PROMETHEUS_AUTHORIZATION_TOKEN
- MAP_STORAGE_URL=map-storage:50053
- INTERNAL_MAP_STORAGE_URL=http://map-storage:3000
- PUBLIC_MAP_STORAGE_URL=https://${DOMAIN}/map-storage
- PLAYER_VARIABLES_MAX_TTL
- EJABBERD_API_URI
- EJABBERD_DOMAIN=${EJABBERD_DOMAIN}
- EJABBERD_USER=${EJABBERD_USER}
- EJABBERD_PASSWORD=${EJABBERD_PASSWORD}
- ENABLE_CHAT
- ENABLE_CHAT_UPLOAD
- SENTRY_DSN=${SENTRY_DSN_BACK}
- SENTRY_RELEASE=${SENTRY_RELEASE}
- SENTRY_TRACES_SAMPLE_RATE=${SENTRY_TRACES_SAMPLE_RATE}
labels:
traefik.enable: "true"
traefik.http.middlewares.strip-api-prefix.stripprefix.prefixes: "/api"
traefik.http.routers.back.rule: "Host(`${DOMAIN}`) && PathPrefix(`/api`)"
traefik.http.routers.back.middlewares: "strip-api-prefix@docker"
traefik.http.routers.back.entryPoints: "web"
traefik.http.services.back.loadbalancer.server.port: "8080"
traefik.http.routers.back-ssl.middlewares: "strip-api-prefix@docker"
traefik.http.routers.back-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/api`)"
traefik.http.routers.back-ssl.entryPoints: "websecure"
traefik.http.routers.back-ssl.service: "back"
traefik.http.routers.back-ssl.tls: "true"
traefik.http.routers.back-ssl.tls.certresolver: "myresolver"
restart: ${RESTART_POLICY}
uploader:
image: thecodingmachine/workadventure-uploader:${VERSION}
environment:
- UPLOADER_URL=https://${DOMAIN}/uploader
#AWS
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}
- AWS_BUCKET=${AWS_BUCKET}
- AWS_URL=${AWS_URL}
- AWS_ENDPOINT=${AWS_ENDPOINT}
#REDIS
- REDIS_HOST=${UPLOADER_REDIS_HOST}
- REDIS_PORT=${UPLOADER_REDIS_PORT}
#CHAT
- ADMIN_API_URL=${ADMIN_API_URL}
- ENABLE_CHAT_UPLOAD=${ENABLE_CHAT_UPLOAD}
- UPLOAD_MAX_FILESIZE=${UPLOAD_MAX_FILESIZE}
labels:
traefik.enable: "true"
traefik.http.middlewares.strip-uploader-prefix.stripprefix.prefixes: "/uploader"
traefik.http.routers.uploader.rule: "Host(`${DOMAIN}`) && PathPrefix(`/uploader`)"
traefik.http.routers.uploader.middlewares: "strip-uploader-prefix@docker"
traefik.http.routers.uploader.entryPoints: "web"
traefik.http.services.uploader.loadbalancer.server.port: "8080"
traefik.http.routers.uploader-ssl.middlewares: "strip-uploader-prefix@docker"
traefik.http.routers.uploader-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/uploader`)"
traefik.http.routers.uploader-ssl.entryPoints: "websecure"
traefik.http.routers.uploader-ssl.service: "uploader"
traefik.http.routers.uploader-ssl.tls: "true"
traefik.http.routers.uploader-ssl.tls.certresolver: "myresolver"
restart: ${RESTART_POLICY}
icon:
image: matthiasluedtke/iconserver:v3.15.0
labels:
traefik.enable: "true"
traefik.http.middlewares.strip-icon-prefix.stripprefix.prefixes: "/icon"
traefik.http.routers.icon.middlewares: "strip-icon-prefix@docker"
traefik.http.routers.icon.rule: "Host(`${DOMAIN}`) && PathPrefix(`/icon`)"
traefik.http.routers.icon.entryPoints: "web"
traefik.http.services.icon.loadbalancer.server.port: "8080"
traefik.http.routers.icon-ssl.middlewares: "strip-icon-prefix@docker"
traefik.http.routers.icon-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/icon`)"
traefik.http.routers.icon-ssl.entryPoints: "websecure"
traefik.http.routers.icon-ssl.service: "icon"
traefik.http.routers.icon-ssl.tls: "true"
traefik.http.routers.icon-ssl.tls.certresolver: "myresolver"
restart: ${RESTART_POLICY}
redis:
image: redis:6
volumes:
- redisdata:/data
restart: ${RESTART_POLICY}
ejabberd:
image: workadventure/ejabberd:v1
ports:
- '5443:5443'
environment:
- CTL_ON_CREATE=register ${EJABBERD_USER} ${EJABBERD_DOMAIN} ${EJABBERD_PASSWORD}
- JWT_SECRET=${EJABBERD_JWT_SECRET}
- EJABBERD_DOMAIN=${EJABBERD_DOMAIN}
- EJABBERD_USER=${EJABBERD_USER}
- EJABBERD_PASSWORD=${EJABBERD_PASSWORD}
volumes:
- ../../xmpp/ejabberd.template.yml:/opt/ejabberd/conf/ejabberd.template.yml
labels:
traefik.enable: "true"
traefik.http.middlewares.strip-ejabberd-prefix.stripprefix.prefixes: "/xmpp"
traefik.http.routers.ejabberd.middlewares: "strip-ejabberd-prefix@docker"
traefik.http.routers.ejabberd.rule: "Host(`${DOMAIN}`) && PathPrefix(`/xmpp`)"
traefik.http.routers.ejabberd.entryPoints: "web"
traefik.http.services.ejabberd.loadbalancer.server.port: "5443"
traefik.http.routers.ejabberd-ssl.middlewares: "strip-ejabberd-prefix@docker"
traefik.http.routers.ejabberd-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/xmpp`)"
traefik.http.routers.ejabberd-ssl.entryPoints: "websecure"
traefik.http.routers.ejabberd-ssl.service: "ejabberd"
traefik.http.routers.ejabberd-ssl.tls: "true"
traefik.http.routers.ejabberd-ssl.tls.certresolver: "myresolver"
restart: ${RESTART_POLICY}
map-storage:
image: thecodingmachine/workadventure-map-storage:${VERSION}
environment:
API_URL: back:50051
PROMETHEUS_AUTHORIZATION_TOKEN: "$PROMETHEUS_AUTHORIZATION_TOKEN"
AUTHENTICATION_STRATEGY: "$MAP_STORAGE_AUTHENTICATION_STRATEGY"
ENABLE_BEARER_AUTHENTICATION: "$MAP_STORAGE_ENABLE_BEARER_AUTHENTICATION"
ENABLE_BASIC_AUTHENTICATION: "$MAP_STORAGE_ENABLE_BASIC_AUTHENTICATION"
ENABLE_DIGEST_AUTHENTICATION: "$MAP_STORAGE_ENABLE_DIGEST_AUTHENTICATION"
AUTHENTICATION_USER: "$MAP_STORAGE_AUTHENTICATION_USER"
AUTHENTICATION_PASSWORD: "$MAP_STORAGE_AUTHENTICATION_PASSWORD"
AUTHENTICATION_TOKEN: "$MAP_STORAGE_AUTHENTICATION_TOKEN"
AUTHENTICATION_VALIDATOR_URL: "$MAP_STORAGE_AUTHENTICATION_VALIDATOR_URL"
SENTRY_DSN: $SENTRY_DSN_MAPSTORAGE
SENTRY_RELEASE: $SENTRY_RELEASE
SENTRY_ENVIRONMENT: $SENTRY_ENVIRONMENT
SENTRY_TRACES_SAMPLE_RATE: $SENTRY_TRACES_SAMPLE_RATE
PATH_PREFIX: "/map-storage"
volumes:
- map-storage-data:/maps
labels:
traefik.enable: "true"
traefik.http.middlewares.strip-map-storage-prefix.stripprefix.prefixes: "/map-storage"
traefik.http.routers.map-storage.middlewares: "strip-map-storage-prefix@docker"
traefik.http.routers.map-storage.rule: "Host(`${DOMAIN}`) && PathPrefix(`/map-storage`)"
traefik.http.routers.map-storage.entryPoints: "web"
traefik.http.services.map-storage.loadbalancer.server.port: "3000"
traefik.http.routers.map-storage-ssl.middlewares: "strip-map-storage-prefix@docker"
traefik.http.routers.map-storage-ssl.rule: "Host(`${DOMAIN}`) && PathPrefix(`/map-storage`)"
traefik.http.routers.map-storage-ssl.entryPoints: "websecure"
traefik.http.routers.map-storage-ssl.service: "map-storage"
traefik.http.routers.map-storage-ssl.tls: "true"
traefik.http.routers.map-storage-ssl.tls.certresolver: "myresolver"
restart: ${RESTART_POLICY}
# coturn:
# image: coturn/coturn:4.5.2
# command:
# - turnserver
# #- -c=/etc/coturn/turnserver.conf
# - --log-file=stdout
# - --external-ip=$$(detect-external-ip)
# - --listening-port=3478
# - --min-port=10000
# - --max-port=10010
# - --tls-listening-port=5349
# - --listening-ip=0.0.0.0
# - --realm=coturn.workadventure.localhost
# - --server-name=coturn.workadventure.localhost
# - --lt-cred-mech
# # Enable Coturn "REST API" to validate temporary passwords.
# #- --use-auth-secret
# #- --static-auth-secret=SomeStaticAuthSecret
# #- --userdb=/var/lib/turn/turndb
# - --user=workadventure:WorkAdventure123
# # use real-valid certificate/privatekey files
# #- --cert=/root/letsencrypt/fullchain.pem
# #- --pkey=/root/letsencrypt/privkey.pem
# network_mode: host
docker compose up -d
cd /home/myusername/docker
mkdir jitsi && cd "$_"
Open the following link then copy the version number: https://github.com/jitsi/docker-jitsi-meet/releases/latest
sudo wget https://github.com/jitsi/docker-jitsi-meet/archive/refs/tags/stable-8615.tar.gz
tar -zxvf stable-8615.tar.gz
cd docker-jitsi-meet-stable-8615
cp env.example .env
nano .env
# shellcheck disable=SC2034
# Security
#
# Set these to strong passwords to avoid intruders from impersonating a service account
# The service(s) won't start unless these are specified
# Running ./gen-passwords.sh will update .env with strong passwords
# You may skip the Jigasi and Jibri passwords if you are not using those
# DO NOT reuse passwords
#
# XMPP password for Jicofo client connections
JICOFO_AUTH_PASSWORD=
# XMPP password for JVB client connections
JVB_AUTH_PASSWORD=
# XMPP password for Jigasi MUC client connections
JIGASI_XMPP_PASSWORD=
# XMPP recorder password for Jibri client connections
JIBRI_RECORDER_PASSWORD=
# XMPP password for Jibri client connections
JIBRI_XMPP_PASSWORD=
#
# Basic configuration options
#
# Directory where all configuration will be stored
CONFIG=~/.jitsi-meet-cfg
# Exposed HTTP port
HTTP_PORT=8154
# Exposed HTTPS port
HTTPS_PORT=8155
# System time zone
TZ= Etc/UTC
# Public URL for the web service (required)
PUBLIC_URL=https://meet.example.com
# IP address of the Docker host
# See the "Running behind NAT or on a LAN environment" section in the Handbook:
# https://jitsi.github.io/handbook/docs/devops-guide/devops-guide-docker#running-behind-nat-or-on-a-lan-environment
DOCKER_HOST_ADDRESS=192.168.1.65
# Control whether the lobby feature should be enabled or not
ENABLE_LOBBY=1
# Control whether the A/V moderation should be enabled or not
#ENABLE_AV_MODERATION=1
# Show a prejoin page before entering a conference
ENABLE_PREJOIN_PAGE=1
# Enable the welcome page
ENABLE_WELCOME_PAGE=1
# Enable the close page
#ENABLE_CLOSE_PAGE=0
# Disable measuring of audio levels
DISABLE_AUDIO_LEVELS=0
# Enable noisy mic detection
ENABLE_NOISY_MIC_DETECTION=1
#
# Let's Encrypt configuration
#
# Enable Let's Encrypt certificate generation
#ENABLE_LETSENCRYPT=1
# Domain for which to generate the certificate
#LETSENCRYPT_DOMAIN=meet.example.com
# E-Mail for receiving important account notifications (mandatory)
#[email protected]
# Use the staging server (for avoiding rate limits while testing)
#LETSENCRYPT_USE_STAGING=1
#
# Etherpad integration (for document sharing)
#
# Set etherpad-lite URL in docker local network (uncomment to enable)
#ETHERPAD_URL_BASE=http://etherpad.meet.jitsi:9001
# Set etherpad-lite public URL (uncomment to enable)
#ETHERPAD_PUBLIC_URL=https://etherpad.my.domain
# Name your etherpad instance!
ETHERPAD_TITLE=Video Chat
# The default text of a pad
ETHERPAD_DEFAULT_PAD_TEXT=Welcome to Web Chat!\n\n
# Name of the skin for etherpad
ETHERPAD_SKIN_NAME=colibris
# Skin variants for etherpad
ETHERPAD_SKIN_VARIANTS=super-light-toolbar super-light-editor light-background full-width-editor
#
# Basic Jigasi configuration options (needed for SIP gateway support)
#
# SIP URI for incoming / outgoing calls
#[email protected]
# Password for the specified SIP account as a clear text
#JIGASI_SIP_PASSWORD=passw0rd
# SIP server (use the SIP account domain if in doubt)
#JIGASI_SIP_SERVER=sip2sip.info
# SIP server port
#JIGASI_SIP_PORT=5060
# SIP server transport
#JIGASI_SIP_TRANSPORT=UDP
#
# Authentication configuration (see handbook for details)
#
# Enable authentication
#ENABLE_AUTH=1
# Enable guest access
ENABLE_GUESTS=1
# Select authentication type: internal, jwt or ldap
#AUTH_TYPE=internal
# JWT authentication
#
# Application identifier
#JWT_APP_ID=my_jitsi_app_id
# Application secret known only to your token generator
#JWT_APP_SECRET=my_jitsi_app_secret
# (Optional) Set asap_accepted_issuers as a comma separated list
#JWT_ACCEPTED_ISSUERS=my_web_client,my_app_client
# (Optional) Set asap_accepted_audiences as a comma separated list
#JWT_ACCEPTED_AUDIENCES=my_server1,my_server2
# LDAP authentication (for more information see the Cyrus SASL saslauthd.conf man page)
#
# LDAP url for connection
#LDAP_URL=ldaps://ldap.DOMAIN.COM/
# LDAP base DN. Can be empty
#LDAP_BASE=DC=example,DC=domain,DC=com
# LDAP user DN. Do not specify this parameter for the anonymous bind
#LDAP_BINDDN=CN=binduser,OU=users,DC=example,DC=domain,DC=com
# LDAP user password. Do not specify this parameter for the anonymous bind
#LDAP_BINDPW=LdapUserPassw0rd
# LDAP filter. Tokens example:
# %1-9 - if the input key is [email protected], then %1 is com, %2 is domain and %3 is mail
# %s - %s is replaced by the complete service string
# %r - %r is replaced by the complete realm string
#LDAP_FILTER=(sAMAccountName=%u)
# LDAP authentication method
#LDAP_AUTH_METHOD=bind
# LDAP version
#LDAP_VERSION=3
# LDAP TLS using
#LDAP_USE_TLS=1
# List of SSL/TLS ciphers to allow
#LDAP_TLS_CIPHERS=SECURE256:SECURE128:!AES-128-CBC:!ARCFOUR-128:!CAMELLIA-128-CBC:!3DES-CBC:!CAMELLIA-128-CBC
# Require and verify server certificate
#LDAP_TLS_CHECK_PEER=1
# Path to CA cert file. Used when server certificate verify is enabled
#LDAP_TLS_CACERT_FILE=/etc/ssl/certs/ca-certificates.crt
# Path to CA certs directory. Used when server certificate verify is enabled
#LDAP_TLS_CACERT_DIR=/etc/ssl/certs
# Wether to use starttls, implies LDAPv3 and requires ldap:// instead of ldaps://
# LDAP_START_TLS=1
#
# Advanced configuration options (you generally don't need to change these)
#
# Internal XMPP domain
XMPP_DOMAIN=meet.jitsi
# Internal XMPP server
XMPP_SERVER=xmpp.meet.jitsi
# Internal XMPP server URL
XMPP_BOSH_URL_BASE=http://xmpp.meet.jitsi:5280
# Internal XMPP domain for authenticated services
XMPP_AUTH_DOMAIN=auth.meet.jitsi
# XMPP domain for the MUC
XMPP_MUC_DOMAIN=muc.meet.jitsi
# XMPP domain for the internal MUC used for jibri, jigasi and jvb pools
XMPP_INTERNAL_MUC_DOMAIN=internal-muc.meet.jitsi
# XMPP domain for unauthenticated users
XMPP_GUEST_DOMAIN=guest.meet.jitsi
# Comma separated list of domains for cross domain policy or "true" to allow all
# The PUBLIC_URL is always allowed
#XMPP_CROSS_DOMAIN=true
# Custom Prosody modules for XMPP_DOMAIN (comma separated)
XMPP_MODULES=
# Custom Prosody modules for MUC component (comma separated)
XMPP_MUC_MODULES=
# Custom Prosody modules for internal MUC component (comma separated)
XMPP_INTERNAL_MUC_MODULES=
# MUC for the JVB pool
JVB_BREWERY_MUC=jvbbrewery
# XMPP user for JVB client connections
JVB_AUTH_USER=jvb
# STUN servers used to discover the server's public IP
JVB_STUN_SERVERS=meet-jit-si-turnrelay.jitsi.net:443
# Media port for the Jitsi Videobridge
JVB_PORT=10000
# TCP Fallback for Jitsi Videobridge for when UDP isn't available
JVB_TCP_HARVESTER_DISABLED=true
JVB_TCP_PORT=4443
JVB_TCP_MAPPED_PORT=4443
# A comma separated list of APIs to enable when the JVB is started [default: none]
# See https://github.com/jitsi/jitsi-videobridge/blob/master/doc/rest.md for more information
#JVB_ENABLE_APIS=rest,colibri
# XMPP user for Jicofo client connections.
# NOTE: this option doesn't currently work due to a bug
JICOFO_AUTH_USER=focus
# Base URL of Jicofo's reservation REST API
#JICOFO_RESERVATION_REST_BASE_URL=http://reservation.example.com
# Enable Jicofo's health check REST API (http://<jicofo_base_url>:8888/about/health)
#JICOFO_ENABLE_HEALTH_CHECKS=true
# XMPP user for Jigasi MUC client connections
JIGASI_XMPP_USER=jigasi
# MUC name for the Jigasi pool
JIGASI_BREWERY_MUC=jigasibrewery
# Minimum port for media used by Jigasi
JIGASI_PORT_MIN=20000
# Maximum port for media used by Jigasi
JIGASI_PORT_MAX=20050
# Enable SDES srtp
#JIGASI_ENABLE_SDES_SRTP=1
# Keepalive method
#JIGASI_SIP_KEEP_ALIVE_METHOD=OPTIONS
# Health-check extension
#JIGASI_HEALTH_CHECK_SIP_URI=keepalive
# Health-check interval
#JIGASI_HEALTH_CHECK_INTERVAL=300000
#
# Enable Jigasi transcription
#ENABLE_TRANSCRIPTIONS=1
# Jigasi will record audio when transcriber is on [default: false]
#JIGASI_TRANSCRIBER_RECORD_AUDIO=true
# Jigasi will send transcribed text to the chat when transcriber is on [default: false]
#JIGASI_TRANSCRIBER_SEND_TXT=true
# Jigasi will post an url to the chat with transcription file [default: false]
#JIGASI_TRANSCRIBER_ADVERTISE_URL=true
# Credentials for connect to Cloud Google API from Jigasi
# Please read https://cloud.google.com/text-to-speech/docs/quickstart-protocol
# section "Before you begin" paragraph 1 to 5
# Copy the values from the json to the related env vars
#GC_PROJECT_ID=
#GC_PRIVATE_KEY_ID=
#GC_PRIVATE_KEY=
#GC_CLIENT_EMAIL=
#GC_CLIENT_ID=
#GC_CLIENT_CERT_URL=
# Enable recording
#ENABLE_RECORDING=1
# XMPP domain for the jibri recorder
XMPP_RECORDER_DOMAIN=recorder.meet.jitsi
# XMPP recorder user for Jibri client connections
JIBRI_RECORDER_USER=recorder
# Directory for recordings inside Jibri container
JIBRI_RECORDING_DIR=/config/recordings
# The finalizing script. Will run after recording is complete
#JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
# XMPP user for Jibri client connections
JIBRI_XMPP_USER=jibri
# MUC name for the Jibri pool
JIBRI_BREWERY_MUC=jibribrewery
# MUC connection timeout
JIBRI_PENDING_TIMEOUT=90
# When jibri gets a request to start a service for a room, the room
# jid will look like: [email protected]_domain
# We'll build the url for the call by transforming that into:
# https://xmpp_domain/subdomain/roomName
# So if there are any prefixes in the jid (like jitsi meet, which
# has its participants join a muc at conference.xmpp_domain) then
# list that prefix here so it can be stripped out to generate
# the call url correctly
JIBRI_STRIP_DOMAIN_JID=muc
# Directory for logs inside Jibri container
JIBRI_LOGS_DIR=/config/logs
# Configure an external TURN server
# TURN_CREDENTIALS="secret"
# TURN_HOST=turnserver.example.com
# TURN_PORT=443
# TURNS_HOST=turnserver.example.com
# TURNS_PORT=443
# Disable HTTPS: handle TLS connections outside of this setup
#DISABLE_HTTPS=1
# Enable FLoC
# Opt-In to Federated Learning of Cohorts tracking
#ENABLE_FLOC=0
# Redirect HTTP traffic to HTTPS
# Necessary for Let's Encrypt, relies on standard HTTPS port (443)
#ENABLE_HTTP_REDIRECT=1
# Send a `strict-transport-security` header to force browsers to use
# a secure and trusted connection. Recommended for production use.
# Defaults to 1 (send the header).
# ENABLE_HSTS=1
# Enable IPv6
# Provides means to disable IPv6 in environments that don't support it (get with the times, people!)
#ENABLE_IPV6=1
# Container restart policy
# Defaults to unless-stopped
RESTART_POLICY=unless-stopped
# Authenticate using external service or just focus external auth window if there is one already.
# TOKEN_AUTH_URL=https://auth.meet.example.com/{room}
./gen-passwords.sh
mkdir -p ~/.jitsi-meet-cfg/{web/letsencrypt,transcripts,prosody/config,prosody/prosody-plugins-custom,jicofo,jvb,jigasi,jibri}
docker compose up -d
cd /home/myusername/docker
mkdir vaultwarden && cd "$_"
version: "3.8"
services:
vaultwarden:
image: vaultwarden/server:latest
container_name: vaultwarden
restart: unless-stopped
environment:
DOMAIN: https://subdomain.DOMAIN.COM
LOGIN_RATELIMIT_MAX_BURST: 10
LOGIN_RATELIMIT_SECONDS: 60
ADMIN_RATELIMIT_MAX_BURST: 10
ADMIN_RATELIMIT_SECONDS: 60
ADMIN_TOKEN: YourReallyStrongAdminTokenHere
SENDS_ALLOWED: true
EMERGENCY_ACCESS_ALLOWED: true
WEB_VAULT_ENABLED: true
SIGNUPS_ALLOWED: false
SIGNUPS_VERIFY: true
SIGNUPS_VERIFY_RESEND_TIME: 3600
SIGNUPS_VERIFY_RESEND_LIMIT: 5
SIGNUPS_DOMAINS_WHITELIST: DOMAIN.COM,YOURSECONDDOMAIN.COM
SMTP_HOST: smtp.DOMAIN.COM
SMTP_FROM: [email protected]
SMTP_FROM_NAME: Vaultwarden
SMTP_SECURITY: starttls # Possible values: “starttls” / “force_tls” / “off”
SMTP_PORT: 587 # Possible values: 587 / 465
SMTP_USERNAME: [email protected]
SMTP_PASSWORD: emailpasswordhere
SMTP_AUTH_MECHANISM: Plain # Possible values: “Plain” / “Login” / “Xoauth2”
volumes:
- ./data/:/data/
ports:
- 9200:80
docker compose up -d
cd /home/myusername/docker
mkdir rocketchat
cd rocketchat
version: "3.8"
services:
rocketchat:
image: rocketchat/rocket.chat:latest
container_name: rocketchat
restart: unless-stopped
command: >
bash -c
"for i in `seq 1 30`; do
node main.js &&
s=$$? && break || s=$$?;
echo \"Tried $$i times. Waiting 5 secs...\";
sleep 5;
done; (exit $$s)"
environment:
PORT: 3000
ROOT_URL: http://localhost:3000
MONGO_URL: mongodb://mongo:27017/rocketchat
MONGO_OPLOG_URL: mongodb://mongo:27017/local
MAIL_URL: smtp://smtp.email
# HTTP_PROXY: http://proxy.DOMAIN.COM
# HTTPS_PROXY: http://proxy.DOMAIN.COM
volumes:
- ./uploads:/app/uploads
ports:
- 8152:3000
depends_on:
- mongo
mongo:
image: mongo:4.0
container_name: rocketchat-mongo
restart: unless-stopped
command: mongod --smallfiles --oplogSize 128 --replSet rs0 --storageEngine=mmapv1
volumes:
- ./data/db:/data/db
#- ./data/dump:/dump
# this container's job is just run the command to initialize the replica set.
# it will run the command and remove himself (it will not stay running)
mongo-init-replica:
image: mongo:4.0
command: >
bash -c
"for i in `seq 1 30`; do
mongo mongo/rocketchat --eval \"
rs.initiate({
_id: 'rs0',
members: [ { _id: 0, host: 'localhost:27017' } ]})\" &&
s=$$? && break || s=$$?;
echo \"Tried $$i times. Waiting 5 secs...\";
sleep 5;
done; (exit $$s)"
depends_on:
- mongo
docker compose up -d
cd /home/myusername/docker
mkdir synapse && cd "$_"
docker run -it --rm -v ./data:/data -e SYNAPSE_SERVER_NAME=<your-intended-url> -e SYNAPSE_REPORT_STATS=no matrixdotorg/synapse:latest generate
cd /home/myusername/docker/synapse/data
line 68: public_baseurl: https://chat.DOMAIN.COM/
line 83: enabled: false
line 116: allow_public_rooms_over_federation: true
line 126: default_room_version: "6"
line 159: enable_search: true
line 1151: enable_registration: true
line 2095: smtp_host: mail.DOMAIN.COM
line 2099: smtp_port: 587
line 2104: smtp_user: "[email protected]"
line 2105: smtp_pass: "REPLACE_WITH_YOUR_EMAIL_PASSWORD"
TLS via STARTTLS *if the SMTP server supports it* line 2112: require_transport_security: true
line 2132: notif_from: "Your Friendly %(app)s homeserver <[email protected]>"
line 2137: app_name: my_branded_matrix_server
line 2142: enable_notifs: true
line 2147: notif_for_new_users: true
line 2166: invite_client_location: https://app.element.io
version: "3.8"
services:
synapse:
image: matrixdotorg/synapse:latest
container_name: synapse
restart: unless-stopped
volumes:
- ./data:/data
ports:
- 8150:8008
- 8151:443
To make yourself an admin, you will need to create a new account using ELEMENT then install sqlite3 on your linux machine.
Download the client: https://element.io/download
cd /home/myusername/docker/synapse/data
apt install sqlite3
sqlite3 homeserver.db
SELECT * FROM users;
UPDATE users SET admin=1 WHERE name= '@myusername:myserver.com';
.quit
Debian:
apt update
apt install matrix-mirage
Arch Linux:
yay -S matrix-mirage
Windows:
https://element.io/get-started#download
IOS:
https://matrix.org/docs/projects/client/element-ios
Android:
https://matrix.org/docs/projects/client/element-android
cd /home/myusername/docker
git clone https://github.com/zulip/docker-zulip.git
cd docker-zulip
If you use zulip with Cloudflare tunnel make sure to add DISABLE_HTTPS=True in the zulip environment docker-compose.yml file
and remove SSL_CERTIFICATE_GENERATION: "self-signed"
version: "3.8"
services:
db:
image: zulip/zulip-postgresql:14
container_name: zulip-postgresql
restart: unless-stopped
environment:
POSTGRES_DB: zulip
POSTGRES_USER: zulip
# Note that you need to do a manual `ALTER ROLE` query if you
# change this on a system after booting the postgres container
# the first time on a host. Instructions are available in README.md.
POSTGRES_PASSWORD: zulip!
volumes:
- ./db:/var/lib/postgresql/data:rw
memcached:
image: memcached:alpine
container_name: zulip-memcached
restart: unless-stopped
command:
- "sh"
- "-euc"
- |
echo 'mech_list: plain' > "$$SASL_CONF_PATH"
echo "zulip@$$HOSTNAME:$$MEMCACHED_PASSWORD" > "$$MEMCACHED_SASL_PWDB"
echo "zulip@localhost:$$MEMCACHED_PASSWORD" >> "$$MEMCACHED_SASL_PWDB"
exec memcached -S
environment:
SASL_CONF_PATH: /home/memcache/memcached.conf
MEMCACHED_SASL_PWDB: /home/memcache/memcached-sasl-db
MEMCACHED_PASSWORD: zulip!
rabbitmq:
image: rabbitmq:3.7.7
container_name: zulip-rabbitmq
restart: unless-stopped
environment:
RABBITMQ_DEFAULT_USER: zulip
RABBITMQ_DEFAULT_PASS: zulip!
volumes:
- ./rabbitmq:/var/lib/rabbitmq:rw
redis:
image: redis:alpine
container_name: zulip-redis
restart: unless-stopped
command:
- "sh"
- "-euc"
- |
echo "requirepass '$$REDIS_PASSWORD'" > /etc/redis.conf
exec redis-server /etc/redis.conf
environment:
REDIS_PASSWORD: zulip!
volumes:
- ./redis:/data:rw
zulip:
image: zulip/docker-zulip:6.0-0
container_name: zulip
restart: unless-stopped
build:
context: .
args:
# Change these if you want to build zulip from a different repo/branch
ZULIP_GIT_URL: https://github.com/zulip/zulip.git
ZULIP_GIT_REF: 6.0
# Set this up if you plan to use your own CA certificate bundle for building
# CUSTOM_CA_CERTIFICATES:
environment:
DB_HOST: database
DB_HOST_PORT: 5432
DB_USER: zulip
SSL_CERTIFICATE_GENERATION: self-signed
SETTING_MEMCACHED_LOCATION: memcached:11211
SETTING_RABBITMQ_HOST: rabbitmq
SETTING_REDIS_HOST: redis
# These should match RABBITMQ_DEFAULT_PASS, POSTGRES_PASSWORD,
# MEMCACHED_PASSWORD, and REDIS_PASSWORD above.
SECRETS_rabbitmq_password: zulip!
SECRETS_postgres_password: zulip!
SECRETS_memcached_password: zulip!
SECRETS_redis_password: zulip!
SECRETS_secret_key: zulip!
SETTING_EXTERNAL_HOST: zulip.DOMAIN.COM
SETTING_ZULIP_ADMINISTRATOR: [email protected]
SETTING_EMAIL_HOST: mail.DOMAIN.COM
SETTING_EMAIL_HOST_USER: [email protected]
SECRETS_email_password: REPLACE_WITH_YOUR_EMAIL_PASSWORD
SETTING_EMAIL_PORT: 587
# It seems that the email server needs to use ssl or tls and can't be used without it
SETTING_EMAIL_USE_SSL: False
SETTING_EMAIL_USE_TLS: True
ZULIP_AUTH_BACKENDS: EmailAuthBackend
# Uncomment this when configuring the mobile push notifications service
# SETTING_PUSH_NOTIFICATION_BOUNCER_URL: https://push.zulipchat.com
ulimits:
nofile:
soft: 1000000
hard: 1048576
volumes:
- ./data:/data:rw
ports:
- 8157:80
- 8158:443
docker compose up -d
docker exec -it zulip_zulip_1 /bin/bash
cd /home/zulip/deployments/current/
su zulip
./manage.py generate_realm_creation_link
docker exec -it zulip_zulip_1 /bin/bash
su zulip -c '/home/zulip/deployments/current/manage.py send_test_email [email protected]'
cd /home/myusername/docker
mkdir uptime-kuma && cd "$_"
version: "3.8"
services:
uptimekuma:
image: louislam/uptime-kuma:latest
container_name: uptimekuma
restart: unless-stopped
volumes:
- ./data:/app/data
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8160:3001
docker compose up -d
sudo su
cd /opt
git clone https://github.com/mailcow/mailcow-dockerized
cd mailcow-dockerized
./generate_config.sh
mail.DOMAIN.COM
utc
docker compose up -d
cd /home/myusername/docker
mkdir wireguard && cd "$_"
version: "3.8"
services:
wireguard:
image: linuxserver/wireguard
container_name: wireguard
restart: unless-stopped
cap_add:
- NET_ADMIN
- SYS_MODULE
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
SERVERURL: auto # (wireguard.DOMAIN.COM)(192.168.1.100)
SERVERPORT: 51820 #optional
PEERS: 1 #optional
PEERDNS: auto #optional
INTERNAL_SUBNET: 10.13.13.0 #optional
sysctls:
- net.ipv4.conf.all.src_valid_mark=1
volumes:
- ./config:/config
- /lib/modules:/lib/modules
ports:
- 51820:51820/udp
docker compose up -d
WireGuard client Windows: https://www.wireguard.com/install/
WireGuard client Linux: https://github.com/UnnoTed/wireguird
cd /home/myusername/docker/wireguard-server/config/peer1
docker exec -it wireguard /app/show-peer 1
version: "3.8"
services:
wg-easy:
image: ghcr.io/wg-easy/wg-easy:latest
container_name: wireguard-webui
restart: unless-stopped
environment:
WG_HOST: vpn.DOMAIN.COM # (vpn.DOMAIN.COM) or local server ip (192.168.1.100)
PASSWORD: wireguard # When set, requires a password when logging in to the Web UI.
#WG_PORT: 51820 # The public UDP port of your VPN server. (if disabled default is: 51820)
WG_DEFAULT_ADDRESS: 10.13.13.x # Clients IP address range.
#WG_DEFAULT_DNS: 1.1.1.1 # DNS server clients will use. If set to blank value, clients will not use any DNS. (if disabled default is: 1.1.1.1)
#WG_DEVICE: eth0 # Ethernet device the wireguard traffic should be forwarded through. (if disabled default is: eth0)
#LANG: en # Web UI language (Supports: en, ru, tr, no, pl, fr, de, ca, es) (if disabled default is: en)
#WG_MTU: 1420 # The MTU the clients will use. (if disabled default is: null)
#WG_ALLOWED_IPS: 192.168.15.0/24, 10.0.1.0/24 # Allowed IPs clients will use.
#WG_PRE_UP: echo "Pre Up" > /etc/wireguard/pre-up.txt
#WG_POST_UP: echo "Post Up" > /etc/wireguard/post-up.txt
#WG_PRE_DOWN: echo "Pre Down" > /etc/wireguard/pre-down.txt
#WG_POST_DOWN: echo "Post Down" > /etc/wireguard/post-down.txt
volumes:
- ./data:/etc/wireguard
ports:
- "51820:51820/udp"
- "51821:51821/tcp"
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
docker compose up -d
If you use Cloudflare:
Make sure to enable: gRPC Allow gRPC connections to your origin server. (In the network category)
cd /home/myusername/docker
The following command will create a folder called netbird then it will cd into netbird/infrastructure_files
REPO="https://github.com/netbirdio/netbird/"; LATEST_TAG=$(basename $(curl -fs -o/dev/null -w %{redirect_url} ${REPO}releases/latest)); echo $LATEST_TAG; git clone --depth 1 --branch $LATEST_TAG $REPO && cd netbird/infrastructure_files
Name: Netbird
Authentication Flow: default-authentication-flow (Welcome to authentik!)
Authorization Flow: default-provider-authorization-explicit-consent (Authorize Application)
Protocol Settings:
Make sure to save the Client ID because you need to add it later in setup.env
Advanced protocol settings:
Name: Netbird
Slug: netbird
Provider: Netbird
Username: netbird
Create group: disable
Expiring: disable
Make sure to save the username and password because you need to add it later in setup.env
Verify if the endpoint returns a JSON response by calling it from your browser.
https://YOUR_AUTHENTIK_HOST_AND_PORT/application/o/netbird/.well-known/openid-configuration
nano setup.env
Add the following config to setup.env
# Dashboard domain.
NETBIRD_DOMAIN="netbird.DOMAIN.COM"
# TURN server domain. e.g. turn.mydomain.com
# if not specified it will assume NETBIRD_DOMAIN
NETBIRD_TURN_DOMAIN=""
# TURN server public IP address
# required for a connection involving peers in
# the same network as the server and external peers
# If you are confused just use the main server IP where netbird is hosted on
NETBIRD_TURN_EXTERNAL_IP="192.168.1.x"
NETBIRD_AUTH_OIDC_CONFIGURATION_ENDPOINT="https://authentik.DOMAIN.COM/application/o/netbird/.well-known/openid-configuration"
# Replace IMPORT_CLIENT_ID_HERE with the authentik client ID of netbird
NETBIRD_AUTH_AUDIENCE="IMPORT_CLIENT_ID_HERE"
NETBIRD_AUTH_CLIENT_ID="IMPORT_CLIENT_ID_HERE"
NETBIRD_AUTH_DEVICE_AUTH_CLIENT_ID="IMPORT_CLIENT_ID_HERE"
NETBIRD_AUTH_DEVICE_AUTH_AUDIENCE=$NETBIRD_AUTH_AUDIENCE
NETBIRD_AUTH_DEVICE_AUTH_SCOPE="openid"
NETBIRD_AUTH_DEVICE_AUTH_USE_ID_TOKEN=false
NETBIRD_AUTH_DEVICE_AUTH_PROVIDER="none"
NETBIRD_AUTH_PKCE_REDIRECT_URL_PORTS="53000"
# Image tags
# You can force specific tags for each component; will be set to latest if empty
NETBIRD_DASHBOARD_TAG=""
NETBIRD_SIGNAL_TAG=""
NETBIRD_MANAGEMENT_TAG=""
COTURN_TAG=""
NETBIRD_AUTH_SUPPORTED_SCOPES="openid profile email offline_access api"
NETBIRD_USE_AUTH0="false"
NETBIRD_MGMT_IDP="authentik"
NETBIRD_IDP_MGMT_CLIENT_ID=$NETBIRD_AUTH_CLIENT_ID
NETBIRD_IDP_MGMT_CLIENT_SECRET=""
NETBIRD_IDP_MGMT_EXTRA_USERNAME="netbird"
NETBIRD_IDP_MGMT_EXTRA_PASSWORD="IMPORT_SERVICE_ACCOUNT_PASSWORD_HERE"
NETBIRD_DISABLE_LETSENCRYPT=true
NETBIRD_LETSENCRYPT_EMAIL=""
NETBIRD_DISABLE_ANONYMOUS_METRICS=false
NETBIRD_MGMT_DNS_DOMAIN=netbird.selfhosted
NETBIRD_MGMT_API_PORT=443
NETBIRD_SIGNAL_PORT=443
cd /home/myusername/docker/traefik-crowdsec/traefik-data
nano fileConfig.yml
http:
routers:
##########################################################
###======================ROUTERS======================###
### NetBird - router ###
netbird:
entryPoints:
- https
- http
rule: "Host(`netbird.DOMAIN.COM`)"
service: netbird
# NetBird API - router
netbird-api:
rule: "Host(`netbird.DOMAIN.COM`) && PathPrefix(`/api`)"
service: netbird-api
# NetBird Management - router
netbird-management:
rule: "Host(`netbird.DOMAIN.COM`) && PathPrefix(`/management.ManagementService/`)"
service: netbird-management
# NetBird Signal - Router
netbird-signal:
rule: "Host(`netbird.DOMAIN.COM`) && PathPrefix(`/signalexchange.SignalExchange/`)"
service: netbird-signal
# NetBird Relay - Router
netbird-relay:
rule: "Host(`netbird.DOMAIN.COM`) && PathPrefix(`/relay`)"
service: netbird-relay
##########################################################
###======================SERVICES======================###
services:
### NetBird - service ###
netbird:
loadBalancer:
servers:
- url: http://192.168.1.x:9180
# NetBird API - service
netbird-api:
loadBalancer:
servers:
- url: http://192.168.1.x:9184
# NetBird Management - service
netbird-management:
loadBalancer:
servers:
- url: h2c://192.168.1.x:9184
# NetBird Signal - service
netbird-signal:
loadBalancer:
servers:
- url: h2c://192.168.1.x:9182
# NetBird Relay - service
netbird-relay:
loadBalancer:
servers:
- url: http://192.168.1.x:33080
./configure.sh
cd artifacts && docker compose up -d
The following docker-compose.yml file is just a example for using it with a existing reverse proxy you could replace it existing docker-compose.yml with this one in the artifacts folder but make sure you change the DOMAIN. COM to your domain and the PROVIDER_CLIENT_ID_HERE
services:
dashboard:
image: netbirdio/dashboard:latest
container_name: netbird-dashboard
restart: unless-stopped
ports:
- 9180:80
# - 443:443
environment:
# Endpoints
NETBIRD_MGMT_API_ENDPOINT: https://netbird.DOMAIN.COM:443
NETBIRD_MGMT_GRPC_API_ENDPOINT: https://netbird.DOMAIN.COM:443
# OIDC
AUTH_AUDIENCE: PROVIDER_CLIENT_ID_HERE
AUTH_CLIENT_ID: PROVIDER_CLIENT_ID_HERE
AUTH_CLIENT_SECRET:
AUTH_AUTHORITY: https://authentik.DOMAIN.COM/application/o/netbird/
USE_AUTH0: false
AUTH_SUPPORTED_SCOPES: openid profile email offline_access api
AUTH_REDIRECT_URI:
AUTH_SILENT_REDIRECT_URI:
NETBIRD_TOKEN_SOURCE: accessToken
# SSL
NGINX_SSL_PORT: 443
# Letsencrypt
# - LETSENCRYPT_DOMAIN=
# - LETSENCRYPT_EMAIL=
# volumes:
# - ./netbird_container/netbird-letsencrypt:/etc/letsencrypt/
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "2"
networks:
- proxy
signal:
image: netbirdio/signal:latest
container_name: netbird-signal
restart: unless-stopped
volumes:
- ./netbird_container/netbird-signal:/var/lib/netbird
ports:
- 9182:80
# # port and command for Let's Encrypt validation
# - 443:443
# command: ["--letsencrypt-domain", "", "--log-file", "console"]
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "2"
networks:
- proxy
management:
image: netbirdio/management:latest
container_name: netbird-management
restart: unless-stopped
depends_on:
- dashboard
volumes:
- ./netbird_container/netbird-mgmt:/var/lib/netbird
# - ./netbird_container/netbird-letsencrypt:/etc/letsencrypt:ro
- ./management.json:/etc/netbird/management.json
ports:
- 9184:443 #API port
# # command for Let's Encrypt validation without dashboard container
# command: ["--letsencrypt-domain", "", "--log-file", "console"]
command: [
"--port", "443",
"--log-file", "console",
"--log-level", "info",
"--disable-anonymous-metrics=false",
"--single-account-mode-domain=netbird.DOMAIN.COM",
"--dns-domain=netbird.selfhosted"
]
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "2"
networks:
- proxy
coturn:
image: coturn/coturn:latest
container_name: netbird-coturn
restart: unless-stopped
#domainname: netbird.DOMAIN.COM # only needed when TLS is enabled
volumes:
- ./turnserver.conf:/etc/turnserver.conf:ro
# - ./privkey.pem:/etc/coturn/private/privkey.pem:ro
# - ./cert.pem:/etc/coturn/certs/cert.pem:ro
network_mode: host
command:
- -c /etc/turnserver.conf
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "2"
relay:
image: netbirdio/relay:latest
container_name: netbird-relay
restart: unless-stopped
environment:
NB_LOG_LEVEL: info
NB_LISTEN_ADDRESS: :33080
NB_EXPOSED_ADDRESS: rels://netbird.DOMAIN.COM:443/relay
NB_AUTH_SECRET: XtsT4YH4g99nehxK39X8zcjlay2e1MMPWm6GKxguXJs # generate a new key with: openssl rand -base64 32 | sed 's/=//g'
ports:
- 33080:33080
logging:
driver: "json-file"
options:
max-size: "500m"
max-file: "2"
networks:
- proxy
networks:
proxy:
external: true
"Relay": {
"Addresses": ["rels://netbird.DOMAIN.COM:443/relay"],
"CredentialsTTL": "24h",
"Secret": "XtsT4YH4g99nehxK39X8zcjlay2e1MMPWm6GKxguXJs"
},
cd /home/myusername/docker
mkdir netbird_client && cd "$_"
When netbird-client network is set to MACVLAN (LAN) the server host ip will not be accessable
When netbird-client network is set to default docker network it will have full access
version: "3.8"
services:
netbird:
image: netbirdio/netbird:latest
container_name: netbird-client
restart: unless-stopped
cap_add:
- NET_ADMIN
environment:
NB_SETUP_KEY: IMPORT_SETUP_KEY_HERE
NB_MANAGEMENT_URL: https://netbird.DOMAIN.COM:443
volumes:
- ./data:/etc/netbird
networks:
- proxy
networks:
proxy:
external: true
Download the client here: https://pkgs.netbird.io/windows/x64
docker compose pull
docker compose up -d --force-recreate
verify netbird version
docker inspect IMAGE_ID | grep version
cd /home/myusername/docker
mkdir seafile && cd "$_" && mkdir onlyoffice && touch onlyoffice/local.conf
version: "3.8"
services:
seafile-db:
image: mariadb:latest
container_name: seafile_mariadb
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: seafile!
MYSQL_LOG_CONSOLE: true
volumes:
- ./db:/var/lib/mysql
ports:
- "9308:3306"
networks:
- seafile-net
seafile:
image: seafileltd/seafile-mc:11.0-latest
container_name: seafile
restart: unless-stopped
environment:
DB_HOST: seafile-db
DB_ROOT_PASSWD: seafile!
TIME_ZONE: Etc/UTC
SEAFILE_ADMIN_EMAIL: [email protected]
SEAFILE_ADMIN_PASSWORD: seafile
SEAFILE_SERVER_LETSENCRYPT: false
SEAFILE_SERVER_HOSTNAME: seafile.DOMAIN.COM
volumes:
- ./data:/shared
ports:
- "8240:80"
depends_on:
- seafile-db
- seafile-memcached
networks:
- seafile-net
seafile-memcached:
image: memcached:1.6
container_name: seafile_memcached
restart: unless-stopped
command: memcached -m 256
networks:
- seafile-net
onlyoffice-postgresql:
image: postgres:12
container_name: seafile_onlyoffice_postgres
restart: unless-stopped
environment:
POSTGRES_DB: onlyoffice
POSTGRES_USER: onlyoffice
POSTGRES_HOST_AUTH_METHOD: trust
volumes:
- ./onlyoffice/db:/var/lib/postgresql
ports:
- "9310:5432"
networks:
- seafile-net
onlyoffice-documentserver:
image: onlyoffice/documentserver:latest
container_name: seafile_onlyoffice_ds
restart: unless-stopped
environment:
DB_TYPE: postgres
DB_HOST: onlyoffice-postgresql
DB_PORT: 5432
DB_USER: onlyoffice
DB_NAME: onlyoffice
AMQP_URI: amqp://guest:guest@onlyoffice-rabbitmq
# Uncomment strings below to enable the JSON Web Token validation.
JWT_ENABLED: true
JWT_SECRET: onlyoffice
JWT_HEADER: Authorization
JWT_IN_BODY: true
volumes:
# Optional: see https://manual.seafile.com/deploy/only_office/
#- ./onlyoffice/local.json:/etc/onlyoffice/documentserver/local.json
#- ./onlyoffice/local-production-linux.json:/etc/onlyoffice/documentserver/local-production-linux.json
- ./onlyoffice/data:/var/www/onlyoffice/Data
- ./onlyoffice/lib:/var/lib/onlyoffice
- ./onlyoffice/logs:/var/log/onlyoffice
ports:
- "8243:80"
networks:
- seafile-net
onlyoffice-rabbitmq:
image: rabbitmq:latest
container_name: seafile_onlyoffice_rabbitmq
restart: unless-stopped
volumes:
- ./onlyoffice/rabbitmq/data:/var/lib/rabbitmq/
- ./onlyoffice/rabbitmq/log:/var/log/rabbitmq
networks:
- seafile-net
networks:
seafile-net:
external: true
docker compose up -d
Default login
Username:[email protected]
Password:seafile
At the dashboard:
System admin -> Users -> Add User
Admin -> Add Admin
Add the following config at the bottom to seahub_settings.py
# OnlyOffice
ENABLE_ONLYOFFICE = True
VERIFY_ONLYOFFICE_CERTIFICATE = False
ONLYOFFICE_APIJS_URL = 'https://YOUR_ONLYOFFICE_DOMAIN_HERE/web-apps/apps/api/documents/api.js'
ONLYOFFICE_FILE_EXTENSION = ('doc', 'docx', 'ppt', 'pptx', 'xls', 'xlsx', 'odt', 'fodt', 'odp', 'fodp', 'ods', 'fods')
ONLYOFFICE_EDIT_FILE_EXTENSION = ('docx', 'pptx', 'xlsx')
# "Force Save" to allow users to save files when pressing the save button on the OnlyOffice file edit page.
ONLYOFFICE_FORCE_SAVE = True
# JWT secret can be used to secure your OnlyOffice server so other people will not be able to use it.
ONLYOFFICE_JWT_SECRET = 'onlyoffice'
Replace the following config at seafile.nginx.conf
# -*- mode: nginx -*-
# Auto generated at 02/12/2024 14:23:06
# Required for only office document server
map $http_x_forwarded_proto $the_scheme {
default $http_x_forwarded_proto;
"" $scheme;
}
map $http_x_forwarded_host $the_host {
default $http_x_forwarded_host;
"" $host;
}
map $http_upgrade $proxy_connection {
default upgrade;
"" close;
}
server {
listen 80;
server_name seafile.DOMAIN.COM;
client_max_body_size 10m;
location / {
proxy_pass http://127.0.0.1:8000/;
proxy_read_timeout 310s;
proxy_set_header Host $http_host;
proxy_set_header Forwarded "for=$remote_addr;proto=$scheme";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Connection "";
proxy_http_version 1.1;
client_max_body_size 0;
access_log /var/log/nginx/seahub.access.log seafileformat;
error_log /var/log/nginx/seahub.error.log;
}
location /seafhttp {
rewrite ^/seafhttp(.*)$ $1 break;
proxy_pass http://127.0.0.1:8082;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size 0;
proxy_connect_timeout 36000s;
proxy_read_timeout 36000s;
proxy_request_buffering off;
access_log /var/log/nginx/seafhttp.access.log seafileformat;
error_log /var/log/nginx/seafhttp.error.log;
}
location /onlyofficeds/ {
# THIS ONE IS IMPORTANT ! - Trailing slash !
proxy_pass http://127.0.0.1:8243/;
proxy_http_version 1.1;
client_max_body_size 100M; # Limit Document size to 100MB
proxy_read_timeout 3600s;
proxy_connect_timeout 3600s;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
# THIS ONE IS IMPORTANT ! - Subfolder and NO trailing slash !
proxy_set_header X-Forwarded-Host $the_host/onlyofficeds;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Forwarded "for=$remote_addr;proto=https";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
access_log /var/log/nginx/onlyoffice.access.log seafileformat;
error_log /var/log/nginx/onlyoffice.error.log;
}
location /notification/ping {
proxy_pass http://127.0.0.1:8083/ping;
access_log /var/log/nginx/notification.access.log seafileformat;
error_log /var/log/nginx/notification.error.log;
}
location /notification {
proxy_pass http://127.0.0.1:8083/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
access_log /var/log/nginx/notification.access.log seafileformat;
error_log /var/log/nginx/notification.error.log;
}
location /seafdav {
proxy_pass http://127.0.0.1:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 1200s;
client_max_body_size 0;
access_log /var/log/nginx/seafdav.access.log seafileformat;
error_log /var/log/nginx/seafdav.error.log;
}
location /media {
root /opt/seafile/seafile-server-latest/seahub;
}
location ~ /\.git {
deny all;
}
# Optional error pages remove if not needed
error_page 403 /forbidden.html;
location = /forbidden.html {
root /var/www/html;
internal;
}
# Optional error pages remove if not needed
error_page 502 /maintenance.html;
error_page 504 /maintenance.html;
error_page 500 /maintenance.html;
location = /maintenance.html {
root /usr/share/nginx/html;
internal;
}
}
cd /home/myusername/docker
mkdir -p grafana/{prometheus,fluent,loki,tempo} && cd grafana
version: "3.8"
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
hostname: prometheus
# restart: unless-stopped
user: root
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=240h'
- '--web.enable-lifecycle'
volumes:
- ./prometheus/data:/prometheus
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- 8320:9090
labels:
org.label-schema.group: "monitoring"
networks:
- management
depends_on:
- cadvisor
# METRICS EXPORTER OF DOCKER CONTAINERS
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: cadvisor
hostname: cadvisor
# restart: unless-stopped
privileged: true
devices:
- /dev/kmsg:/dev/kmsg
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker:/var/lib/docker:ro
- /cgroup:/cgroup:ro
ports:
- 8323:3000
labels:
org.label-schema.group: "monitoring"
networks:
- management
# METRICS EXPORTER OF HOST LINUX MACHINE
nodeexporter:
image: prom/node-exporter:latest
container_name: nodeexporter
hostname: nodeexporter
# restart: unless-stopped
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
- '--collector.systemd'
- '--collector.processes'
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
ports:
- 8322:9100
labels:
org.label-schema.group: "monitoring"
networks:
- management
# LOG FORWARDER
fluentbit:
image: fluent/fluent-bit:latest
container_name: logsvc
hostname: logsvc
# restart: unless-stopped
volumes:
- ./fluent:/config
- ./fluent/logs:/logs
command:
- "-c"
- "/config/fluent.yml"
ports:
- 8325:9880
- 8326:4318
networks:
- management
# LOG COLLECTOR LIKE PROMETHEUS
loki:
image: grafana/loki:latest
container_name: loki
hostname: loki
# restart: unless-stopped
ports:
- 8324:3100
volumes:
- ./loki/:/etc/loki/
command: -config.file=/etc/loki/config.yml
networks:
- management
# TEMPORARY STORAGE BACKEND FOR LOKI
minio:
image: minio/minio:latest
container_name: minio
hostname: minio
user: root
environment:
MINIO_ROOT_USER: tempo
MINIO_ROOT_PASSWORD: minio!!
volumes:
- ./minio/buckets:/data
command: server /data --console-address ':9001'
networks:
- management
# TRACING BACKEND FOR GRAFANA
tempo:
image: grafana/tempo:latest
container_name: tempo
hostname: tempo
# restart: unless-stopped
command: "-target=scalable-single-binary -config.file=/etc/tempo.yaml"
volumes:
- ./tempo/tempo.yml:/etc/tempo.yaml
ports:
- 8327:3200
networks:
- management
depends_on:
- minio
# METRICS MAIN GUI DASHBOARD AND VISUALIZER
grafana:
image: grafana/grafana:latest
container_name: grafana
hostname: grafana
restart: unless-stopped
user: root
environment:
GF_SECURITY_ADMIN_USER: admin
GF_SECURITY_ADMIN_PASSWORD: admin
GF_USERS_ALLOW_SIGN_UP: false
GF_SMTP_ENABLED: true
GF_SMTP_HOST: mail.DOMAIN.COM:587
GF_SMTP_USER: [email protected]
GF_SMTP_PASSWORD: REPLACE_WITH_YOUR_EMAIL_PASSWORD
GF_FEATURE_TOGGLES_ENABLE: traceqlEditor
GF_INSTALL_PLUGINS: grafana-piechart-panel,marcusolsson-json-datasource,marcusolsson-dynamictext-panel
volumes:
- ./data:/var/lib/grafana
- ./datasources:/etc/grafana/provisioning/datasources
ports:
- 8321:3000
labels:
org.label-schema.group: "monitoring"
networks:
- management
networks:
management:
external: true
nano datasources.yml
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: false
version: 1
editable: false
jsonData:
httpMethod: GET
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://tempo:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus
- name: Loki
type: loki
access: proxy
url: http://loki:3100
jsonData:
maxLines: 1000
cd prometheus
nano prometheus.yml
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: [ 'localhost:9090' ]
- job_name: 'metrics_fluentbit'
metrics_path: /api/v1/metrics/prometheus
static_configs:
- targets: [ 'logsvc:2020' ]
- job_name: 'traefik'
static_configs:
- targets: [ 'traefik:8193' ]
- job_name: 'loki'
static_configs:
- targets: [ 'loki:3100' ]
- job_name: 'tempo'
static_configs:
- targets:
- 'tempo:3200'
- job_name: 'metrics_node_exporter'
static_configs:
- targets: ['nodeexporter:9100']
- job_name: 'metrics_cadvisor'
static_configs:
- targets: ['cadvisor:8080']
cd tempo
nano tempo.yml
server:
http_listen_port: 3200
distributor:
receivers: # this configuration will listen on all ports and protocols that tempo is capable of.
otlp:
protocols:
http:
grpc:
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
compactor:
compaction:
block_retention: 1h # overall Tempo trace retention. set for demo purposes
memberlist:
abort_if_cluster_join_fails: false
bind_port: 7946
join_members:
- tempo:7946
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /tmp/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
storage:
trace:
backend: s3 # backend configuration to use
wal:
path: /tmp/tempo/wal # where to store the the wal locally
s3:
bucket: tempo # how to store data in s3
endpoint: minio:9000
access_key: tempo
secret_key: supersecret
insecure: true
# For using AWS, select the appropriate regional endpoint and region
# endpoint: s3.dualstack.us-west-2.amazonaws.com
region: us-west-1
querier:
frontend_worker:
frontend_address: tempo:9095
overrides:
metrics_generator_processors: ['service-graphs', 'span-metrics']
cd fluent
nano fluent.yml
# setting up a local environment variable
env:
flush_interval: 1
http_addr: 0.0.0.0
http_port: 2020
# service configuration
service:
flush: ${flush_interval}
log_level: info
http_server: "on"
http_listen: ${http_addr}
http_port: ${http_port}
parsers_file: /config/parsers.conf
pipeline:
inputs:
- name: http
listen: 0.0.0.0
port: 9880
buffer_max_size: 4M
buffer_chunk_size: 512K
successful_response_code: 201
- name: tail
path: /logs/traefik.log
path_key: filepath
skip_empty_lines: true
tag: traefik_logs
- name: tail
path: /logs/traefik_access.log
path_key: filepath
skip_empty_lines: true
tag: traefik_access_logs
- name: opentelemetry
raw_traces: false
listen: 0.0.0.0
port: 4318
buffer_max_size: 4M
buffer_chunk_size: 512K
successful_response_code: 201
# Fluent Bit will listen on port 4318 for data.
# You can now send telemetry data to the endpoints
# /v1/metrics
# /v1/traces
# /v1/logs
filters:
- name: expect
match: "*"
key_val_is_not_null: traceId
action: result_key
- name: expect
match: "*"
key_val_is_not_null: sessionId
action: result_key
- name: modify
match: "*"
rename: timestamp time
- name: modify
match: "minio*"
add: service minio
- name: modify
match: "svc.*"
add: service api
- name: modify
match: "traefik_logs"
add: node metrics
- name: modify
match: "traefik_logs"
add: service traefik
- name: modify
match: "traefik_access_logs"
add: service traefik
- name: modify
match: "traefik_access_logs"
add: node metrics
- name: modify
match: "traefik_access_logs"
add: accesslog true
- name: parser
match: traefik_logs
key_name: log
parser: json
preserve_key: true
reserve_data: true
- name: parser
match: traefik_access_logs
key_name: log
parser: json
preserve_key: true
reserve_data: true
outputs:
# - name: stdout
# match: "*"
- name: loki
match: "*"
host: loki
labels: job=pi
port: 3100
line_format: json
- name: opentelemetry
match: opentelemetry*
host: tempo
port: 4318
# metrics_uri: "/"
# logs_uri: "/"
# traces_uri: "/"
http_user: ""
http_Passwd: ""
log_response_payload: true
nano parsers.conf
[PARSER]
Name json
Format json
Time_Key timestamp
Time_Keep On
cd loki
nano config.yml
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
schema_config:
configs:
- from: 2021-08-01
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: index_
period: 24h
common:
path_prefix: /loki
replication_factor: 1
storage:
s3:
endpoint: minio:9000
insecure: true
bucketnames: loki-data
access_key_id: tempo
secret_access_key: supersecret
s3forcepathstyle: true
ring:
kvstore:
store: memberlist
ruler:
storage:
s3:
bucketnames: loki-ruler
alertmanager_url: http://localhost:9093
docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
nano /etc/docker/daemon.json
{
"log-driver": "loki",
"log-opts": {
"loki-url": "http://localhost:3100/loki/api/v1/push",
"loki-batch-size": "400"
}
}
{
"default-runtime": "nvidia",
"runtimes": {
"nvidia": {
"path": "/usr/bin/nvidia-container-runtime",
"runtimeArgs": []
}
},
"log-driver": "loki",
"log-opts": {
"loki-url": "http://localhost:3100/loki/api/v1/push",
"loki-batch-size": "400"
}
}
systemctl restart docker
docker compose up -d
Default login
Username:admin
Password:admin
Go to: Home -> Connections -> Data sources
Click on Add new data source and click Prometheus
Set Prometheus server URL * to: http://prometheus:9090
Download and import dashboards at: Home -> Dashboards -> Import dashboard
Make sure you have port 6060 open at the crowdsec docker container
cd /home/myusername/docker/grafana/prometheus
nano prometheus.yml
- job_name: "crowdsec"
static_configs:
- targets: ['YOURSERVERIP:6060']
docker restart prometheus
Download and import dashboards at: Home -> Dashboards -> Import dashboard
cd /home/myusername/docker
mkdir librespeed_exporter && cd "$_"
version: "3.8"
services:
librespeed_exporter:
image: brendonmatheson/prometheus-librespeed-exporter:1.0.0
container_name: librespeed-exporter
restart: unless-stopped
hostname: librespeedexporter
ports:
- 8162:9469
docker compose up -d
cd /home/myusername/docker/grafana/prometheus
nano prometheus.yml
- job_name: "metrics_librespeed_exporter"
metrics_path: /probe
params:
script: [librespeed]
static_configs:
- targets:
- librespeedexporter:9469
scrape_interval: 360m
scrape_timeout: 2m
docker restart prometheus
cd /home/myusername/docker/pihole
mkdir pihole_exporter && cd "$_"
version: "3.8"
services:
pihole-exporter:
image: ekofr/pihole-exporter:latest
container_name: pihole-exporter
hostname: piholeexporter
environment:
PIHOLE_PROTOCOL: http
PIHOLE_HOSTNAME: pihole01
PIHOLE_PASSWORD: pihole
PIHOLE_PORT: 80
PORT: 9617
ports:
- 8125:9617
# https://github.com/eko/pihole-exporter
docker compose up -d
cd /home/myusername/docker/grafana/prometheus
- job_name: "metrics_pihole_exporter"
static_configs:
- targets: ['piholeexporter:9617']
docker restart prometheus
cd /home/myusername/docker
mkdir librespeed && cd "$_"
version: "3.8"
services:
librespeed:
image: lscr.io/linuxserver/librespeed:latest
container_name: librespeed
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
PASSWORD: librespeed
CUSTOM_RESULTS: false #optional
# DB_TYPE: sqlite #optional
# DB_NAME: librespeed #optional
# DB_HOSTNAME: db #optional
# DB_USERNAME: librespeed #optional
# DB_PASSWORD: librespeed! #optional
# IPINFO_APIKEY: ACCESS_TOKEN #optional
volumes:
- ./config:/config
ports:
- 8161:80
docker compose up -d
cd /home/myusername/docker
mkdir pufferpanel && cd "$_"
version: "3.8"
services:
pufferpanel:
image: pufferpanel/pufferpanel:latest
container_name: pufferpanel
restart: unless-stopped
volumes:
- ./config:/etc/pufferpanel
- ./servers:/var/lib/pufferpanel
ports:
- 8185:8080
- 8186:5657
- 25565:25565 # Example port used for a minecraft server
docker compose up -d
docker exec -it pufferpanel /pufferpanel/pufferpanel user add
We highly recommend to create a custom docker network VLAN for this service. The command below is our recommendation for networking
docker network create --subnet=10.0.31.0/24 --gateway=10.0.31.1 --ip-range=10.0.31.0/24 --driver=bridge --attachable=true gamelan
cd /home/myusername/docker
mkdir pterodactyl && cd "$_"
mkdir -p /wings/data/lib /wings/data/tmp/pterodactyl
version: "3.8"
services:
db:
image: mariadb:latest
container_name: pterodactyl_mariadb
restart: unless-stopped
command: --default-authentication-plugin=mysql_native_password
volumes:
- ./panel/db:/var/lib/mysql
environment:
MYSQL_DATABASE: panel
MYSQL_USER: pterodactyl
MYSQL_PASSWORD: pterodactyl!
MYSQL_ROOT_PASSWORD: pterodactyl!!
networks:
- gamelan
cache:
image: redis:alpine
container_name: pterodactyl_redis
restart: unless-stopped
networks:
- gamelan
panel:
image: ghcr.io/pterodactyl/panel:latest
container_name: pterodactyl_panel
restart: unless-stopped
stdin_open: true
tty: true
ports:
- 8180:80
# - 8184:443 # OPTIONAL
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/containers:/var/lib/docker/containers
- /etc/ssl/certs:/etc/ssl/certs:ro
- ./panel/conf/certs:/etc/letsencrypt #
- ./panel/conf/etc:/etc/pterodactyl
- ./panel/conf/nginx:/etc/nginx/http.d #
- ./panel/data/var:/app/var #
- ./panel/data/logs/panel:/app/storage/logs #
- ./panel/data/logs/panel:/var/log/panel/logs
- ./panel/data/panel:/var/www/html
- ./panel/data/lib:/var/lib/pterodactyl
environment:
RECAPTCHA_ENABLED: false
TZ: Etc/UTC
APP_TIMEZONE: Etc/UTC
APP_ENV: production
APP_ENVIRONMENT_ONLY: false
APP_URL: https://gpanel.DOMAIN.COM
APP_SERVICE_AUTHOR: [email protected]
MAIL_FROM: [email protected]
MAIL_DRIVER: smtp
MAIL_HOST: mail.DOMAIN.COM
MAIL_PORT: 587
MAIL_USERNAME: [email protected]
MAIL_PASSWORD: REPLACE_WITH_YOUR_EMAIL_PASSWORD
MAIL_ENCRYPTION: false
TRUSTED_PROXIES: "*"
PTERODACTYL_TELEMETRY_ENABLED: false
DB_HOST: db
DB_PORT: 3306
DB_PASSWORD: pterodactyl!
CACHE_DRIVER: redis
SESSION_DRIVER: redis
QUEUE_DRIVER: redis
REDIS_HOST: cache
# LE_EMAIL: "" # Uncomment if you want to use Let's Encrypt to generate an SSL certificate for the Panel.
networks:
- gamelan
wings:
image: ghcr.io/pterodactyl/wings:latest
container_name: pterodactyl_wings
restart: unless-stopped
ports:
- 8181:8080
- 8182:2022 # SFTP
# - 8183:443 # OPTIONAL
stdin_open: true
tty: true
environment:
TZ: Etc/UTC
APP_TIMEZONE: Etc/UTC
WINGS_UID: 1000
WINGS_GID: 1000
WINGS_USERNAME: pterodactyl
volumes:
- /var/run/docker.sock:/var/run/docker.sock #
- /var/lib/docker/containers:/var/lib/docker/containers # - ./panel/data/containers:/var/lib/docker/containers
- /etc/ssl/certs:/etc/ssl/certs:ro #
- ./wings/conf/etc:/etc/pterodactyl #
- /wings/data/lib:/wings/data/lib #
- ./wings/data/logs/wings:/var/log/pterodactyl #
- /wings/data/tmp/pterodactyl:/wings/data/tmp/pterodactyl #
# - ./wings/data/lib/wings.db:/wings/data/lib/wings.db
networks:
- gamelan
networks:
gamelan:
external: true
docker compose up -d
cd /home/myusername/docker/pterodactyl
docker compose run --rm panel php artisan p:user:make [email protected] --username=admin --name-first=admin --name-last=user --password=admin --admin=1 --no-password
Go to: Admin > Locations
Click on: Create new
Name it: home
Go to: Admin --> Nodes
Click on: Create new
Insert the following settings then click on Create Node:
Settings
Name: gpanel-node01.DOMAIN.COM
Location: home
FQDN: gpanel-node01.DOMAIN.COM
Communicate Over SSL: Use SSL Connection
Behind Proxy: Behind Proxy
Allocation Limits
Total Memory: 10240
Overallocate: 0
Disk Space: 102400
Overallocate: 0
General Configuration
Daemon Port: 443
Daemon SFTP Port 2022
configuration
cd /home/myusername/docker/pterodactyl
nano wings/conf/etc/config.yml
debug: false
app_name: Pterodactyl
uuid: UUID_ID_HERE
token_id: TOKEN_ID_HERE
token: TOKEN_HERE
api:
host: 0.0.0.0
port: 8080
ssl:
enabled: false
cert: /etc/letsencrypt/live/gpanelnode.DOMAIN.COM/fullchain.pem
key: /etc/letsencrypt/live/gpanelnode.DOMAIN.COM/privkey.pem
disable_remote_download: false
upload_limit: 100
trusted_proxies: []
system:
root_directory: /wings/data/lib
log_directory: /var/log/pterodactyl
data: /wings/data/lib/volumes
archive_directory: /wings/data/lib/archives
backup_directory: /wings/data/lib/backups
tmp_directory: /wings/data/tmp/pterodactyl
username: pterodactyl
timezone: Etc/UTC
user:
rootless:
enabled: false
container_uid: 0
container_gid: 0
uid: 1000
gid: 1000
disk_check_interval: 150
activity_send_interval: 60
activity_send_count: 100
check_permissions_on_boot: true
enable_log_rotate: true
websocket_log_count: 150
sftp:
bind_address: 0.0.0.0
bind_port: 2022
read_only: false
crash_detection:
enabled: true
detect_clean_exit_as_crash: true
timeout: 60
backups:
write_limit: 0
compression_level: best_speed
transfers:
download_limit: 0
openat_mode: auto
docker:
network:
interface: 10.0.31.1
dns:
- 1.1.1.1
- 1.0.0.1
name: gamelan
ispn: false
driver: bridge
network_mode: gamelan
is_internal: false
enable_icc: true
network_mtu: 1500
interfaces:
v4:
subnet: 10.0.31.0/24
gateway: 10.0.31.1
v6:
subnet: fdba:17c8:6c94::/64
gateway: fdba:17c8:6c94::1011
domainname: ""
registries: {}
tmpfs_size: 100
container_pid_limit: 512
installer_limits:
memory: 1024
cpu: 100
overhead:
override: false
default_multiplier: 1.05
multipliers: {}
use_performant_inspect: true
userns_mode: ""
log_config:
type: local
config:
compress: "false"
max-file: "1"
max-size: 5m
mode: non-blocking
throttles:
enabled: true
lines: 2000
line_reset_interval: 100
remote: https://gpanel.DOMAIN.COM
remote_query:
timeout: 30
boot_servers_per_page: 50
allowed_mounts: []
allowed_origins:
- '*'
allow_cors_private_network: true
ignore_panel_config_updates: false
docker compose restart wings
Go to: Admin --> Nodes
Click on the node
Click on: Allocation
For Assign New Allocations insert the following then click on Submit:
IP Address: 0.0.0.0
IP Alias: Minecraft Servers
ports: 25565-25599
Go to: Admin > Servers
Click on: Create new
Insert the following then click on Create Server:
Core Details
Server Name: Minecraft Vanilla
Server Owner: admin email you are logged in with
Server Description: A Minecraft Vanilla Server
Resource Management
Memory: 2048
Disk Space: 2048
cd /home/myusername/docker
mkdir zitadel && cd "$_"
version: "3.8"
services:
zitadel:
image: ghcr.io/zitadel/zitadel:latest
container_name: zitadel
restart: unless-stopped
command: 'start-from-init --masterkey "MasterkeyNeedsToHave32Characters" --tlsMode disabled'
environment:
ZITADEL_DATABASE_COCKROACH_HOST: crdb
ZITADEL_EXTERNALSECURE: false
depends_on:
crdb:
condition: 'service_healthy'
ports:
- 9165:8080
networks:
- zitadel
crdb:
image: cockroachdb/cockroach:latest
container_name: zitadel-cockroachdb
restart: unless-stopped
command: 'start-single-node --insecure'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health?ready=1"]
interval: '10s'
timeout: '30s'
retries: 5
start_period: '20s'
ports:
- 9311:8080
- 26257:26257
networks:
- zitadel
networks:
zitadel:
Default login at http://IMPORT_ZITADEL_URL_HERE:9165/ui/console:
Username:[email protected]
Password:Password1!
cd /home/myusername/docker
mkdir -p authelia/{config,redis}
version: "3.8"
services:
authelia:
image: authelia/authelia
container_name: authelia
restart: unless-stopped
healthcheck:
disable: true
environment:
TZ: Etc/UTC
volumes:
- ./config:/config
ports:
- 6190:9091
depends_on:
- redis
redis:
image: redis:alpine
container_name: authelia-redis
restart: unless-stopped
environment:
TZ: Etc/UTC
volumes:
- ./redis:/data
ports:
- 6379:6379
cd config
users:
john:
displayname: "John Doe"
password: "$argon2id$v=19$m=65536,t=3,p=2$BpLnfgDsc2WD8F2q$o/vzA4myCqZZ36bUGsDY//8mKUYNZZaR0t4MFFSs+iM"
email: [email protected]
groups:
- admins
- dev
harry:
displayname: "Harry Potter"
password: "$argon2id$v=19$m=65536,t=3,p=2$BpLnfgDsc2WD8F2q$o/vzA4myCqZZ36bUGsDY//8mKUYNZZaR0t4MFFSs+iM"
email: [email protected]
groups: []
##############################################################
# Authelia configuration #
###############################################################
host: 0.0.0.0
port: 9091 # change this if you changed it in the docker-compose file
log_level: info
jwt_secret: some-other-long-string-of-letters-and-numbers-in-mixed-case
default_redirection_url: https://auth.example.com
totp:
issuer: example.com
period: 30
skew: 1
#duo_api: ## You can use this api if you want push notifications of auth attempts
# hostname: api-123456789.example.com
# integration_key: ABCDEF
# secret_key: yet-another-long-string-of-characters-and-numbers-and-symbols
authentication_backend:
disable_reset_password: false
file:
path: /config/users_database.yml # create this file !IMPORTANT!
password:
algorithm: argon2id
iterations: 1
salt_length: 16
parallelism: 8
memory: 64
access_control:
default_policy: deny # NOTE: all domains added in NPM rules will be denied unless added below
rules:
# Rules applied to everyone
- domain:
- "movies.example.com"
- "auth.example.com"
policy: bypass
- domain:
- "dashboard.example.com"
- "search.example.com"
- "example.com"
policy: one_factor
# networks:
# - 192.168.1.0/24
- domain:
- "ntop.example.com"
policy: two_factor
# networks:
- 192.168.1.0/24
session:
name: authelia_session
# This secret can also be set using the env variables AUTHELIA_SESSION_SECRET_FILE
secret: <some-long-mix-set-of-numbers-and-letters-upper-and-lower-case>
expiration: 3600 # 1 hour
inactivity: 7200 # 2 hours
domain: your-domain.org # Should match whatever your root protected domain is
redis:
host: authelia_redis_1
port: 6379
# This secret can also be set using the env variables AUTHELIA_SESSION_REDIS_PASSWORD_FILE
# password: authelia
regulation:
max_retries: 3
find_time: 2m
ban_time: 10m
theme: dark # options: dark, light
storage:
local:
path: /config/db.sqlite3
notifier:
# filesystem:
# filename: /config/notification.txt
smtp:
username: [email protected]
password: REPLACE_WITH_YOUR_EMAIL_PASSWORD
host: mail.DOMAIN.COM
port: 587 # 25 non-ssl, 443 ssl, 587 tls
sender: [email protected]
subject: "[Authelia] {title}"
disable_require_tls: false # set to true if your domain uses no tls or ssl only
disable_html_emails: false # set to true if you don't want html in your emails
tls:
server_name: <your-email-host-url-or-ip>
skip_verify: false
minimum_version: TLS1.2
cd /home/myusername/docker
mkdir -p nginx/{www,config} && cd nginx/www
mkdir website01
docker run --name tmp-nginx-container -d nginx
docker cp tmp-nginx-container:/etc/nginx/nginx.conf /home/myusername/docker/nginx/config/nginx.conf
docker rm -f tmp-nginx-container
version: "3.8"
services:
nginx_website01:
image: nginx
container_name: nginx_website01
restart: unless-stopped
volumes:
- ./www/website01:/usr/share/nginx/html
- ./config/nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- 8171:80
docker compose up -d
cd /home/myusername/docker
mkdir wordpress && cd "$_"
version: "3.8"
services:
wordpress:
image: wordpress:latest
container_name: wordpress01
restart: unless-stopped
environment:
WORDPRESS_DB_NAME: wpdocker
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_PASSWORD: wordpress!
WORDPRESS_DB_HOST: db
volumes:
- ./www/YOUREWEBSITENAME:/var/www/html
ports:
- 8170:80
depends_on:
- db
db:
image: mariadb:latest
container_name: wordpress01_mariadb
restart: unless-stopped
environment:
MYSQL_DATABASE: wpdocker
MYSQL_USER: wordpress
MYSQL_PASSWORD: wordpress!
MYSQL_ROOT_PASSWORD: wordpress!!
volumes:
- ./www/db:/var/lib/mysql
docker compose up -d
cd /home/myusername/docker/wordpress/www/YOURDOMAIN
nano .htaccess
php_value upload_max_filesize 64M
php_value post_max_size 64M
php_value max_execution_time 300
php_value max_input_time 300
cd /home/myusername/docker/wordpress/www/YOURDOMAIN/wp-content/themes/YOURACTIVETHEME
nano functions.php
add_filter( 'rest_authentication_errors', function( $result ) {
// If a previous authentication check was applied,
// pass that result along without modification.
if ( true === $result || is_wp_error( $result ) ) {
return $result;
}
// No authentication has been performed yet.
// Return an error if user is not logged in.
if ( ! is_user_logged_in() ) {
return new WP_Error(
'rest_not_logged_in',
__( 'You are not currently logged in.' ),
array( 'status' => 401 )
);
}
// Our custom authentication check should have no effect
// on logged-in requests
return $result;
});
remove_action('wp_head', 'rsd_link');
remove_action('wp_head', 'wlwmanifest_link');
remove_action('wp_head', 'wp_generator');
remove_action('wp_head', 'start_post_rel_link');
remove_action('wp_head', 'index_rel_link');
remove_action('wp_head', 'adjacent_posts_rel_link');
Verify if the endpoint returns a "status":401 JSON response by calling it from your browser.
https://DOMAIN.COM/wp-json/wp/v2/users/
cd /home/myusername/docker
mkdir webtop && cd "$_"
version: "3.8"
services:
webtop:
image: lscr.io/linuxserver/webtop
container_name: webtop
restart: unless-stopped
privileged: true
shm_size: 1gb
devices:
- /dev/dri:/dev/dri
security_opt:
- seccomp=unconfined
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
SUBFOLDER: /
KEYBOARD: en-us-qwerty
TITLE: Webtop
volumes:
- ./config:/config
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8211:3000
docker compose up -d
version: "3.8"
services:
webtop:
image: lscr.io/linuxserver/webtop:ubuntu-kde
container_name: webtop-ubuntu-kde
restart: unless-stopped
privileged: true
shm_size: 1gb
devices:
- /dev/dri:/dev/dri
security_opt:
- seccomp=unconfined
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
SUBFOLDER: /
KEYBOARD: en-us-qwerty
TITLE: Webtop-Ubuntu-KDE
volumes:
- ./config:/config
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8211:3000
docker compose up -d
Default login
Username:abc
Password:abc
cd /home/myusername/docker
mkdir mango && cd "$_"
version: "3.8"
services:
mango:
image: hkalexling/mango
container_name: mango
restart: unless-stopped
volumes:
- ./data:/root/mango
- ./config:/root/.config/mango
ports:
- 8135:9000
docker compose up -d
cd /home/myusername/docker
mkdir filebrowser && cd "$_"
mkdir config database srv
cd config
touch filebrowser.db
cd database
touch settings.json
version: "3.8"
services:
filebrowser:
image: filebrowser/filebrowser:latest
container_name: filebrowser
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
volumes:
- ./srv:/srv
- ./database/filebrowser.db:/database.db
- ./config/filebrowser.json:/filebrowser.json
ports:
- 8131:80
docker compose up -d
cd /home/myusername/docker
mkdir peppermint && cd "$_"
version: "3.8"
services:
client:
image: pepperlabs/peppermint
container_name: peppermint
restart: unless-stopped
environment:
PORT: 5000
DB_USERNAME: peppermint
DB_PASSWORD: peppermint!
DB_HOST: postgres
BASE_URL: http://192.168.1.65:5000
ports:
- 8220:5000
depends_on:
- db
db:
image: postgres:latest
container_name: peppermint-postgres
restart: unless-stopped
environment:
POSTGRES_USER: peppermint
POSTGRES_PASSWORD: peppermint!
POSTGRES_DB: peppermint
volumes:
- ./db:/data/db
docker compose up -d
Default login:
Email:[email protected]
Password:1234
cd /home/myusername/docker
mkdir uvdesk && cd "$_"
version: "3.8"
services:
uvdesk:
image: nuttcorp/uvdesk:latest
container_name: uvdesk
restart: unless-stopped
tty: true
environment:
MYSQL_DATABASE: uvdesk
MYSQL_USER: uvdesk
MYSQL_PASSWORD: uvdesk!
MYSQL_ROOT_PASSWORD: uvdesk!!
ports:
- 8221:80
depends_on:
- db
db:
image: "mysql:5.7"
container_name: uvdesk-mysql
restart: unless-stopped
environment:
MYSQL_DATABASE: uvdesk
MYSQL_USER: uvdesk
MYSQL_PASSWORD: uvdesk!
MYSQL_ROOT_PASSWORD: uvdesk!!
volumes:
- ./db:/var/lib/mysql
docker compose up -d
Database Configuration
Server: uvdesk_db_1
Port: 3306
Username: root
Password: uvdesk
Database: uvdesk
Create Super Admin Account
Name: superroot
Email: [email protected]
Password: uvdesk!
Confirm Password: uvdesk!
Website Configuration
Member Panel Prefix: agent
Customer Panel Prefix: customer
cd /home/myusername/docker
mkdir glpi && cd "$_"
version: "3.8"
services:
db:
image: mariadb:10.7
container_name: glpi-mariadb
restart: unless-stopped
environment:
MARIADB_DATABASE: glpi
MARIADB_USER: glpi
MARIADB_PASSWORD: glpi!
MARIADB_ROOT_PASSWORD: glpi!!
volumes:
- ./db:/var/lib/mysql
glpi:
image: diouxx/glpi
container_name: glpi
restart: unless-stopped
environment:
TIMEZONE: Etc/UTC
volumes:
- ./data:/var/www/html/glpi
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 8222:80
docker compose up -d
Database connection parameters
SQL Server (MariaDB or MySQL):container_name
SQL User:MYSQL_USER
SQL Password:MYSQL_PASSWORD
Default logins
Administrator:glpi/glpi
Technician:tech/tech
Normal:normal/normal
Postonly:post-only/postonly
cd /home/myusername/docker
mkdir matomo && cd "$_"
MYSQL_PASSWORD=Welkom123!
MYSQL_DATABASE=matomo
MYSQL_USER=matomo
MATOMO_DATABASE_ADAPTER=mysql
MATOMO_DATABASE_TABLES_PREFIX=matomo_
MATOMO_DATABASE_USERNAME=matomo
MATOMO_DATABASE_PASSWORD=Welkom123!
MATOMO_DATABASE_DBNAME=matomo
MARIADB_AUTO_UPGRADE=1
MARIADB_INITDB_SKIP_TZINFO=1
upstream php-handler {
server app:9000;
}
server {
listen 80;
add_header Referrer-Policy origin; # make sure outgoing links don't show the URL to the Matomo instance
root /var/www/html; # replace with path to your matomo instance
index index.php;
try_files $uri $uri/ =404;
## only allow accessing the following php files
location ~ ^/(index|matomo|piwik|js/index|plugins/HeatmapSessionRecording/configs).php {
# regex to split $uri to $fastcgi_script_name and $fastcgi_path
fastcgi_split_path_info ^(.+\.php)(/.+)$;
# Check that the PHP script exists before passing it
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param HTTP_PROXY ""; # prohibit httpoxy: https://httpoxy.org/
fastcgi_pass php-handler;
}
## deny access to all other .php files
location ~* ^.+\.php$ {
deny all;
return 403;
}
## disable all access to the following directories
location ~ /(config|tmp|core|lang) {
deny all;
return 403; # replace with 404 to not show these directories exist
}
location ~ /\.ht {
deny all;
return 403;
}
location ~ js/container_.*_preview\.js$ {
expires off;
add_header Cache-Control 'private, no-cache, no-store';
}
location ~ \.(gif|ico|jpg|png|svg|js|css|htm|html|mp3|mp4|wav|ogg|avi|ttf|eot|woff|woff2|json)$ {
allow all;
## Cache images,CSS,JS and webfonts for an hour
## Increasing the duration may improve the load-time, but may cause old files to show after an Matomo upgrade
expires 1h;
add_header Pragma public;
add_header Cache-Control "public";
}
location ~ /(libs|vendor|plugins|misc/user) {
deny all;
return 403;
}
## properly display textfiles in root directory
location ~/(.*\.md|LEGALNOTICE|LICENSE) {
default_type text/plain;
}
}
# vim: filetype=nginx
version: "3.8"
services:
app:
image: matomo:fpm-alpine
container_name: matomo-app
restart: unless-stopped
environment:
MATOMO_DATABASE_HOST: db
PHP_MEMORY_LIMIT: 2048M
env_file:
- ./db.env
volumes:
# - ./config:/var/www/html/config:rw
# - ./logs:/var/www/html/logs
- ./data:/var/www/html
depends_on:
- db
db:
image: mariadb:latest
container_name: matomo-mariadb
restart: unless-stopped
command: --max-allowed-packet=64MB
environment:
MYSQL_ROOT_PASSWORD: REPLACE_WITH_MYSQL_PASSWORD_FROM_ENV
env_file:
- ./db.env
volumes:
- ./db:/var/lib/mysql
web:
image: nginx:alpine
container_name: matomo-web
restart: unless-stopped
volumes:
- ./data:/var/www/html:ro
# see https://github.com/matomo-org/matomo-nginx
- ./matomo.conf:/etc/nginx/conf.d/default.conf:ro
ports:
- 8165:80
docker compose up -d
cd /home/myusername/docker
mkdir fail2ban && cd "$_"
version: "3.8"
services:
fail2ban:
image: crazymax/fail2ban:latest
container_name: fail2ban
restart: unless-stopped
network_mode: host
cap_add:
- NET_ADMIN
- NET_RAW
environment:
TZ: Etc/UTC
F2B_LOG_TARGET: STDOUT
F2B_LOG_LEVEL: INFO
F2B_DB_PURGE_AGE: 365d
SSMTP_HOST: mail.DOMAIN.COM
SSMTP_PORT: 587
SSMTP_HOSTNAME: DOMAIN.COM
SSMTP_USER: [email protected]
SSMTP_PASSWORD: REPLACE_WITH_YOUR_EMAIL_PASSWORD
SSMTP_TLS: YES
volumes:
- /home/myusername/docker/fail2ban/data:/data
- /home/myusername/docker/nginx-proxy-manager/data/logs/:/log/npm/:ro
- /var/log/auth.log:/var/log/auth.log:ro
docker compose up -d
cd /home/myusername/docker
mkdir crowdsec
cd crowdsec
mkdir config
version: "3.8"
services:
crowdsec:
image: crowdsecurity/crowdsec:latest
container_name: crowdsec
restart: unless-stopped
environment:
GID: "${GID-1000}"
COLLECTIONS: "crowdsecurity/linux crowdsecurity/traefik"
# depends_on: #uncomment if running traefik in the same compose file
# - 'traefik'
volumes:
- ./config/acquis.yaml:/etc/crowdsec/acquis.yaml
- crowdsec-db:/var/lib/crowdsec/data/
- crowdsec-config:/etc/crowdsec/
- traefik_traefik-logs:/var/log/traefik/:ro
networks:
- proxy
bouncer-traefik:
image: docker.io/fbonalair/traefik-crowdsec-bouncer:latest
container_name: bouncer-traefik
restart: unless-stopped
environment:
CROWDSEC_BOUNCER_API_KEY: komtlater
CROWDSEC_AGENT_HOST: crowdsec:8080
networks:
- proxy # same network as traefik + crowdsec
depends_on:
- crowdsec
networks:
proxy:
external: true
volumes:
crowdsec-db:
crowdsec-config:
traefik_traefik-logs: # this will be the name of the volume from trarfic logs
external: true # remove if traefik is running on same stack
cd config
filenames:
- /var/log/traefik/*
labels:
type: traefik
cd traefik
cd data
api:
dashboard: true
debug: true
entryPoints:
http:
address: ":80"
http:
middlewares:
- crowdsec-bouncer@file
https:
address: ":443"
http:
middlewares:
- crowdsec-bouncer@file
serversTransport:
insecureSkipVerify: true
providers:
docker:
endpoint: "unix:///var/run/docker.sock"
exposedByDefault: false
file:
filename: /config.yml
certificatesResolvers:
cloudflare:
acme:
email: [email protected]
storage: acme.json
dnsChallenge:
provider: cloudflare
resolvers:
- "1.1.1.1:53"
log:
level: "INFO"
filePath: "/var/log/traefik/traefik.log"
accessLog:
filePath: "/var/log/traefik/access.log"
cd traefik
version: "3.8"
services:
traefik:
image: traefik:latest
container_name: traefik
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
- proxy
ports:
- 80:80
- 443:443
environment:
CF_API_EMAIL: [email protected]
CF_DNS_API_TOKEN: YOUR_API_TOKEN
# - CF_API_KEY=YOUR_API_KEY
# be sure to use the correct one depending on if you are using a token or key
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./data/traefik.yml:/traefik.yml:ro
- ./data/acme.json:/acme.json
- ./data/config.yml:/config.yml:ro
- traefik-logs:/var/log/traefik
labels:
- "traefik.enable=true"
- "traefik.http.routers.traefik.entrypoints=http"
- "traefik.http.routers.traefik.rule=Host(`traefik-dashboard.local.example.com`)"
- "traefik.http.middlewares.traefik-auth.basicauth.users=USER:BASIC_AUTH_PASSWORD"
- "traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme=https"
- "traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto=https"
- "traefik.http.routers.traefik.middlewares=traefik-https-redirect"
- "traefik.http.routers.traefik-secure.entrypoints=https"
- "traefik.http.routers.traefik-secure.rule=Host(`traefik-dashboard.local.example.com`)"
- "traefik.http.routers.traefik-secure.middlewares=traefik-auth"
- "traefik.http.routers.traefik-secure.tls=true"
- "traefik.http.routers.traefik-secure.tls.certresolver=cloudflare"
- "traefik.http.routers.traefik-secure.tls.domains[0].main=local.example.com"
- "traefik.http.routers.traefik-secure.tls.domains[0].sans=*.local.example.com"
- "traefik.http.routers.traefik-secure.service=api@internal"
networks:
proxy:
external: true
volumes:
traefik-logs:
docker compose up -d
cd crowdsec
docker compose up -d
docker exec crowdsec cscli bouncers add bouncer-traefik
INSERT THE API KEY IN DOCKER-COMPOSE.YML OF CROWDSEC
CROWDSEC_BOUNCER_API_KEY: place_the_api_key_here
docker compose up -d
cd traefik
cd data
ADD THE FOLLOWING TO THE MIDDLEWARE CATEGORY:
crowdsec-bouncer:
forwardauth:
address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true
entryPoints:
http:
address: ":80"
http:
middlewares:
- crowdsec-bouncer@file
https:
address: ":443"
http:
middlewares:
- crowdsec-bouncer@file
docker compose up -d
cd /home/myusername/docker
mkdir wikijs && cd "$_"
version: "3.8"
services:
wiki:
image: ghcr.io/requarks/wiki:2
container_name: wikijs
restart: unless-stopped
environment:
DB_TYPE: postgres
DB_HOST: db
DB_PORT: 5432
DB_USER: wikijs
DB_PASS: wikijs!
DB_NAME: wiki
volumes:
- ./data/content:/wiki/data/content
- ./config:/config
ports:
- 8141:3000
depends_on:
- db
db:
image: postgres:11-alpine
container_name: wikijs_postgres
restart: unless-stopped
environment:
POSTGRES_DB: wiki
POSTGRES_PASSWORD: wikijs!
POSTGRES_USER: wikijs
logging:
driver: none
volumes:
- ./db:/var/lib/postgresql/data
docker compose up -d
body {
background: #000
}
header {
border-bottom: 1px solid #ccc
}
.v-navigation-drawer__content {
--bkg: url("https://wallpaperaccess.com/full/797185.png");
/*--bkg: url("https://mir-s3-cdn-cf.behance.net/project_modules/disp/451206106881743.5f9a1f8faa991.gif");*/
--bkg-color: #171717;
background: var(--bkg-color)!important;
background-image: var(--bkg)!important;
background-blend-mode: multiply;
background-size: cover!important;
background-attachment: fixed!important;
background-repeat: no-repeat!important;
background-position: center center!important;
}
.v-navigation-drawer__content::before {
content: "";
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
backdrop-filter: blur(5px);
}
#root .v-navigation-drawer__content div.v-list {
background: 0 0!important;
background-color: transparent!important
}
#root .v-application .grey.darken-4-d4,
#root .v-application .grey.darken-5 {
background: 0 0!important;
background-color: transparent!important
}
cd /home/myusername/docker
mkdir openproject && cd "$_"
version: "3.8"
services:
openproject:
image: openproject/community:11
container_name: openproject
restart: unless-stopped
environment:
PUID: 998
PGID: 100
SECRET_KEY_BASE: koZirTof1faEzGv7vGyKugOq6RnpislI
volumes:
- ./config:/var/openproject/pgdata
- ./assets:/var/openproject/assets
ports:
- 8204:80
docker compose up -d
cd /home/myusername/docker
mkdir linkwarden && cd "$_"
To generate a NEXTAUTH_SECRET use the following command:
openssl rand -hex 32
version: "3.8"
services:
linkwarden:
image: ghcr.io/linkwarden/linkwarden:latest
container_name: linkwarden
restart: unless-stopped
environment:
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres
NEXTAUTH_SECRET: 04528de216fe029b3513e0a52ffb256dd671235942200348040a73acdf29a3c0
env_file: .env
volumes:
- ./data:/data/data
ports:
- 8136:3000
depends_on:
- postgres
postgres:
image: postgres:16-alpine
container_name: linkwarden_postgresql
restart: unless-stopped
env_file: .env
volumes:
- ./db:/var/lib/postgresql/data
nano .env
# Manual installation database settings
DATABASE_URL=postgresql://user:password@localhost:5432/linkwarden
# Docker installation database settings
POSTGRES_PASSWORD=linkwarden
docker compose up -d
cd /home/myusername/docker
mkdir linkstack && cd "$_"
version: "3.8"
services:
linkstack:
image: linkstackorg/linkstack:latest
container_name: linkstack
restart: unless-stopped
hostname: linkstack
environment:
TZ: Etc/UTC
SERVER_ADMIN: [email protected]
HTTP_SERVER_NAME: linkstack.DOMAIN.COM
HTTPS_SERVER_NAME: linkstack.DOMAIN.COM
LOG_LEVEL: info
PHP_MEMORY_LIMIT: 256M
UPLOAD_MAX_FILESIZE: 8M
LINKSTACK_DB_HOST: mariadb
LINKSTACK_DB_NAME: linkstack
LINKSTACK_DB_USER: linkstack
LINKSTACK_DB_PASSWORD: linkstack!
# volumes:
# - ./data:/htdocs
ports:
- 8229:80
mariadb:
image: mariadb:latest
container_name: linkstack-mariadb
restart: unless-stopped
environment:
MYSQL_DATABASE: linkstack
MYSQL_USER: linkstack
MYSQL_PASSWORD: linkstack!
MYSQL_ROOT_PASSWORD: linkstack!!
volumes:
- ./db:/var/lib/mysql
docker compose up -d
The default database configuration from docker compose file:
Database type: MySQL
Database host: mariadb
Database port: 3306
Database name: linkstack
Database username: linkstack
Database password: linkstack!
cd /home/myusername/docker
mkdir dolibarr && cd "$_"
version: "3.8"
services:
web:
image: dolibarr/dolibarr:latest
container_name: dolibarr
restart: unless-stopped
environment:
DOLI_DB_HOST: db
DOLI_DB_NAME: dolibarr
DOLI_DB_USER: dolibarr
DOLI_DB_PASSWORD: dolibarr!
DOLI_ADMIN_LOGIN: admin
DOLI_ADMIN_PASSWORD: dolibarr
DOLI_URL_ROOT: http://localhost
PHP_INI_DATE_TIMEZONE: Etc/UTC
volumes:
- ./documents:/var/www/documents
- ./html/custom:/var/www/html/custom
ports:
- 8200:80
depends_on:
- db
db:
image: mariadb:latest
container_name: dolibarr-mariadb
restart: unless-stopped
environment:
MYSQL_DATABASE: dolibarr
MYSQL_USER: dolibarr
MYSQL_PASSWORD: dolibarr!
MYSQL_ROOT_PASSWORD: dolibarr!!
volumes:
- ./db:/var/lib/mysql
docker compose up -d
Default login:
Username:admin
Password:dolibarr
cd /home/myusername/docker
mkdir drawio && cd "$_"
version: "3.8"
services:
drawio:
image: jgraph/drawio
container_name: drawio
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://192.168.1.125:8201|| exit 1"]
interval: 1m30s
timeout: 10s
retries: 5
start_period: 10s
ports:
- 8201:8080
- 8202:8443
docker compose up -d
cd /home/myusername/docker
mkdir humhub && cd "$_"
mkdir config && mkdir uploads && mkdir modules
cd uploads
mkdir profile_image && cd ..
version: "3.8"
services:
humhub:
image: mriedmann/humhub:latest
container_name: humhub
restart: unless-stopped
environment:
HUMHUB_DB_USER: humhub
HUMHUB_DB_PASSWORD: humhub!
volumes:
- ./config:/var/www/localhost/htdocs/protected/config
- ./uploads:/var/www/localhost/htdocs/uploads
- ./modules:/var/www/localhost/htdocs/protected/modules
- ./themes:/var/www/localhost/htdocs/themes
ports:
- 8203:80
depends_on:
- db
db:
image: mariadb:latest
container_name: humhub-mariadb
restart: unless-stopped
environment:
MYSQL_DATABASE: humhub
MYSQL_USER: humhub
MYSQL_PASSWORD: humhub!
MYSQL_ROOT_PASSWORD: humhub!!
volumes:
- ./db:/var/lib/mysql
docker compose up -d
Hostname: humhub-mariadb
Port:
Username: humhub
Password: humhub!
Name of Database: humhub
✔️ Create the database if it doesn't exist yet.
✔️ External users can register (show registration form on login)
✔️ Newly registered users have to be activated by an admin first
Allow access for non-registered users to public content (guest access)
✔️ Registered members can invite new users via email
✔️ Allow friendships between members
cd home/myusername/docker
mkdir guacamole && cd "$_"
version: "3.8"
services:
guacamole:
image: abesnier/guacamole # The image didnt got updated --> jwetzell/guacamole
container_name: guacamole
restart: unless-stopped
volumes:
- ./postgres:/config
ports:
- 8210:8080
docker compose up -d
Default login:
Username:guacadmin
Password:guacadmin
Add new user:
Guacadmin --> Settings --> Users --> New User
Delete user Guacadmin:
myusername --> Settings --> Users --> guacadmin --> Delete --> Delete
Create a connection
Connections --> New Connection
EDIT CONNECTION
Name: Win11test
Location: ROOT
Protocol: RDP
PARAMETERS
Hostname: IPV4 OF THE MACHINE!!!
Port: 3389
Authentication
Ignore server certificate: ENABLE
Currently not recommended use remotely instead
cd /home/myusername/docker
mkdir rustdesk && cd "$_"
version: "3.8"
services:
hbbs:
image: rustdesk/rustdesk-server:latest
container_name: rustdesk-hbbs
restart: unless-stopped
command: hbbs -r rustdesk.DOMAIN.COM:21117 # Change the URL to your domain
volumes:
- ./hbbs/data:/root
ports:
- 21115:21115
- 21116:21116
- 21116:21116/udp
- 21118:21118
depends_on:
- hbbr
hbbr:
image: rustdesk/rustdesk-server:latest
container_name: rustdesk-hbbr
restart: unless-stopped
command: hbbr
volumes:
- ./hbbr/data:/root
ports:
- 21117:21117
- 21119:21119
docker compose up -d
cd /home/myusername/docker
mkdir remotely && cd "$_"
version: "3.8"
services:
remotely:
image: immybot/remotely:latest
container_name: remotely
restart: unless-stopped
ports:
- 8215:5000
environment:
ASPNETCORE_ENVIRONMENT: Production
ASPNETCORE_HTTP_PORTS: 5000
# Other ASP.NET Core configurations can be overridden here, such as Logging.
# See https://learn.microsoft.com/en-us/aspnet/core/fundamentals/configuration/?view=aspnetcore-8.0
# Values for DbProvider are SQLite, SQLServer, and PostgreSQL.
Remotely_ApplicationOptions__DbProvider: SQLite
# This path shouldn't be changed. It points to the Docker volume.
Remotely_ConnectionStrings__SQLite: Data Source=/app/AppData/Remotely.db
# If using SQL Server, change the connection string to point to your SQL Server instance.
#Remotely_ConnectionStrings__SQLServer: Server=(localdb)\\mssqllocaldb;Database=Remotely-Server-53bc9b9d-9d6a-45d4-8429-2a2761773502;Trusted_Connection=True;MultipleActiveResultSets=true
# If using PostgreSQL, change the connection string to point to your PostgreSQL instance.
#Remotely_ConnectionStrings__PostgreSQL: Server=Host=localhost;Database=Remotely;Username=postgres;
volumes:
- ./data:/remotely-data
- ./db:/app/AppData
# - ./wwwroot:/app/wwwroot
docker compose up -d
cd /home/myusername/docker
mkdir pwndrop
cd pwndrop
version: "3.8"
services:
pwndrop:
image: lscr.io/linuxserver/pwndrop:latest
container_name: pwndrop
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
SECRET_PATH: /pwndrop # optional
volumes:
- ./config:/config
ports:
- 8133:8080
docker compose up -d
cd /home/myusername/docker
mkdir netbox && cd "$_"
git clone -b release https://github.com/netbox-community/netbox-docker.git && cd netbox-docker
cp docker-compose.override.yml.example docker-compose.override.yml
version: "3.8"
services:
netbox:
ports:
- 9140:8080
nano netbox.env
nano ./env/netbox.env
CORS_ORIGIN_ALLOW_ALL=True
DB_HOST=postgres
DB_NAME=netbox
DB_PASSWORD=netbox!
DB_USER=netbox
[email protected]
EMAIL_PASSWORD=
EMAIL_PORT=587
EMAIL_SERVER=mail.domainhere.com
EMAIL_SSL_CERTFILE=
EMAIL_SSL_KEYFILE=
EMAIL_TIMEOUT=5
[email protected]
# EMAIL_USE_SSL and EMAIL_USE_TLS are mutually exclusive, i.e. they can't both be `true`!
EMAIL_USE_SSL=false
EMAIL_USE_TLS=true
GRAPHQL_ENABLED=true
HOUSEKEEPING_INTERVAL=86400
MAX_PAGE_SIZE=1000
MEDIA_ROOT=/opt/netbox/netbox/media
METRICS_ENABLED=false
REDIS_CACHE_DATABASE=1
REDIS_CACHE_HOST=redis-cache
REDIS_CACHE_INSECURE_SKIP_TLS_VERIFY=false
REDIS_CACHE_PASSWORD=netbox!
REDIS_CACHE_SSL=false
REDIS_DATABASE=0
REDIS_HOST=redis
REDIS_INSECURE_SKIP_TLS_VERIFY=false
REDIS_PASSWORD=netbox!
REDIS_SSL=false
RELEASE_CHECK_URL=https://api.github.com/repos/netbox-community/netbox/releases
SECRET_KEY=r8OwDznj!!dci#P9ghmRfdu1Ysxm0AiPeDCQhKE+N_rClfWNj
SKIP_STARTUP_SCRIPTS=false
SKIP_SUPERUSER=false
SUPERUSER_API_TOKEN=6e04a389-a42d-4ca3-a4a3-9bf420d393fd
[email protected]
SUPERUSER_NAME=admin
SUPERUSER_PASSWORD=netbox
WEBHOOKS_ENABLED=true
docker compose up -d
cd /home/myusername/docker
mkdir netbox && cd "$_"
version: "3.8"
services:
netbox:
image: lscr.io/linuxserver/netbox:latest
container_name: netbox
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
SKIP_SUPERUSER: false
SUPERUSER_NAME: admin
SUPERUSER_EMAIL: [email protected]
SUPERUSER_PASSWORD: netbox
ALLOWED_HOST: '*' # to allow only sertain hosts use this ALLOWED_HOST: 'netbox.DOMAIN.COM'
DB_NAME: netbox
DB_USER: netbox
DB_PASSWORD: netbox!
DB_HOST: db
DB_PORT: 5432
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: netbox!
REDIS_DB_TASK: 0
REDIS_DB_CACHE: 1
BASE_PATH: #optional
REMOTE_AUTH_ENABLED: #optional
REMOTE_AUTH_BACKEND: #optional
REMOTE_AUTH_HEADER: #optional
REMOTE_AUTH_AUTO_CREATE_USER: #optional
REMOTE_AUTH_DEFAULT_GROUPS: #optional
REMOTE_AUTH_DEFAULT_PERMISSIONS: #optional
WEBHOOKS_ENABLED: true
volumes:
- ./config:/config
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 9140:8000
db:
image: postgres:latest
container_name: netbox-postgres
restart: unless-stopped
environment:
POSTGRES_DB: netbox # Set the same value as DB_NAME
POSTGRES_USER: netbox # Set the same value as DB_USER
POSTGRES_PASSWORD: netbox! # Set the same value as DB_PASSWORD
volumes:
- ./db:/var/lib/postgresql/data
redis:
image: redis:latest
container_name: netbox-redis
restart: unless-stopped
command: redis-server --requirepass netbox! # Set your Redis password
volumes:
- ./redis:/data
docker compose up -d
cd /home/myusername/docker
mkdir ipboard && cd "$_"
Make sure you insert the Invision Community files in the ipboard folder that you just made
chmod 0777 ./data/conf_global.php
version: "3.8"
services:
app:
image: maxime1907/ipboard:latest # OPTIONAL, Use cmer81/ipboard:latest for php 8.1
container_name: ipboard
restart: unless-stopped
environment:
MYSQL_HOST: db
MYSQL_DATABASE: ipboard
MYSQL_USER: ipboard
MYSQL_PASSWORD: ipboard!
WEB_ALIAS_DOMAIN: forum.DOMAIN.COM
APPLICATION_UID: 1000
APPLICATION_GID: 1000
PGID: 1000
PUID: 1000
TZ: Etc/UTC
volumes:
- ./data:/app
ports:
- 8156:80
depends_on:
- db
db:
image: mariadb:latest
container_name: ipboard-mariadb
restart: unless-stopped
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW
environment:
MYSQL_DATABASE: ipboard
MYSQL_USER: ipboard
MYSQL_PASSWORD: ipboard!
MYSQL_ROOT_PASSWORD: ipboard!!
volumes:
- ./db:/var/lib/mysql
docker compose up -d
docker exec -it ipboard /bin/bash
chmod 0777 -R /app/applications /app/datastore /app/plugins /app/uploads /app/uploads/logs
If you use cloudflared tunnel make sure to enable Trust IP addresses provided by proxies otherwise you will only see the same local ip address everywhere
AdminCP -> System -> Advanced Configuration. --> Enable Trust IP addresses provided by proxies
Server Details:
Host: db
Username: ipboard
Password: ipboard!
Database Name: ipboard
After installation, If you see broken icons:
Upload Font Awesome 6 in IPS 6.3.0.tar to
System --> Applications --> manual upload
cd /home/myusername/docker
mkdir firefox && cd "$_"
version: "3.8"
services:
firefox:
image: lscr.io/linuxserver/firefox:latest
container_name: firefox
restart: unless-stopped
security_opt:
- seccomp=unconfined
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
volumes:
- ./config:/config
ports:
- 8300:3000
shm_size: 1gb
cd /home/myusername/docker
mkdir cloudflared && cd "$_"
version: "3.8"
networks:
# frontend:
# external: true
# backend:
# external: true
bridge:
driver: bridge
services:
cloudflaretunnel:
image: cloudflare/cloudflared:2023.6.1
container_name: cloudflaretunnel
restart: unless-stopped
command: tunnel --no-autoupdate run
environment:
TUNNEL_TOKEN: IMPORT_CLOUDFLARE_TUNNEL_TOKEN_HERE
networks:
# - frontend
# - backend
- bridge
docker compose up -d
cd /home/myusername/docker
mkdir teleport && cd "$_"
docker run --hostname localhost --rm --entrypoint=/bin/sh -v ./config/:/etc/teleport -it quay.io/gravitational/teleport:11 -c "teleport configure > /etc/teleport/teleport.yml"
cd config
You dont have to enable acme if you bought a domain name
#
# A Sample Teleport configuration file.
#
# Things to update:
# 1. license.pem: You only need a license from https://dashboard.goteleport.com
# if you are an Enterprise customer.
#
version: v3
teleport:
nodename: teleport.DOMAIN.COM
data_dir: /var/lib/teleport
log:
output: stderr
severity: INFO
format:
output: text
ca_pin: ""
diag_addr: ""
auth_service:
enabled: "yes"
listen_addr: 0.0.0.0:3025
proxy_listener_mode: multiplex
cluster_name: teleport.DOMAIN.COM
# ---
# (Optional) Passwordless Authentication
# authentication:
# type: local
# second_factor: on
# webauthn:
# rp_id: teleport.DOMAIN.COM
# connector_name: passwordless
# ---
ssh_service:
enabled: "yes"
commands:
- name: hostname
command: [hostname]
period: 1m0s
proxy_service:
enabled: "yes"
web_listen_addr: 0.0.0.0:443
public_addr: teleport.DOMAIN.COM
https_keypairs: []
acme: {}
# ---
# (Optional) ACME
# acme:
# enabled: "yes"
# email: your-email-address
# ---
version: "3.8"
services:
teleport:
image: quay.io/gravitational/teleport:11
container_name: teleport
restart: unless-stopped
user: 1000:1000
entrypoint: /bin/sh
command: -c "/usr/bin/dumb-init teleport start -d -c /etc/teleport/teleport.yml"
volumes:
- ./config:/etc/teleport
- ./data:/var/lib/teleport
ports:
- 9010:3023
- 9011:3024
- 9012:3025
- 9013:443
docker compose up -d
cd /home/myusername/docker
mkdir upsnap && cd "$_"
version: "3.8"
services:
app:
image: truecharts/upsnap:latest
container_name: upsnap
restart: unless-stopped
network_mode: host
environment:
FRONTEND_PORT: 8000
BACKEND_PORT: 8001
BACKEND_IS_PROXIED: false # set this to true, if you use a reverse proxy
DB_TYPE: sqlite # required
REDIS_HOST: 127.0.0.1 # required (make sure to use the same ip as below)
REDIS_PORT: 6379 # required (make sure to use the same port as below)
# PING_INTERVAL: 5 # optional (default: 5 seconds)
# DJANGO_SUPERUSER_USER: admin # optional (default: backend login disabled)
# DJANGO_SUPERUSER_PASSWORD: admin # optional (default: backend login disabled)
# DJANGO_SECRET_KEY: secret # optional (default: randomly generated)
# DJANGO_DEBUG: True # optional (default: False)
# DJANGO_LANGUAGE_CODE: de # optional (default: en)
# DJANGO_TIME_ZONE: Etc/UTC # optional (default: UTC)
# NMAP_ARGS: -sP # optional, set this if your devices need special nmap args so they can be found (default: -sP)
# PAGE_TITLE: Custom Title # optional, set a custom page title (default: UpSnap)
volumes:
- ./db:/app/backend/db/
depends_on:
redis:
condition: service_healthy
redis:
image: redis:alpine
container_name: upsnap-redis
restart: unless-stopped
command: redis-server --loglevel warning
healthcheck:
test: redis-cli ping
interval: 10s
ports:
- 9015:6379
docker compose up -d
cd /home/myusername/docker
mkdir kasm && cd "$_"
version: "3.8"
services:
kasm:
image: lscr.io/linuxserver/kasm:latest
container_name: kasm
restart: unless-stopped
privileged: true
environment:
KASM_PORT: 443
TZ: Etc/UTC
DOCKER_HUB_USERNAME: USER #optional
DOCKER_HUB_PASSWORD: PASS #optional
volumes:
- ./data:/opt
- ./profiles:/profiles #optional
- /dev/input:/dev/input #optional
- /run/udev/data:/run/udev/data #optional
ports:
- 3000:3000
- 443:443
docker compose up -d
cd /home/myusername/docker
mkdir ispy && cd "$_"
version: "3.8"
services:
ispy:
image: doitandbedone/ispyagentdvr
container_name: ispy
restart: unless-stopped
environment:
TZ: Etc/UTC
volumes:
- /path/to/a/large/storage/drive/ispy/:/agent/Media/WebServerRoot/Media/
- ./media:/agent/Media/XML/
- ./commands:/agent/Commands/
ports:
- 8230:8090
- 3478:3478/udp
- 50000-50010:50000-50010/udp
docker compose up -d
cd /home/myusername/docker
mkdir unifi-controller && cd "$_"
version: "3.8"
services:
unifi-controller:
image: lscr.io/linuxserver/unifi-controller:latest
container_name: unifi-controller
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
MEM_LIMIT: 1024 #optional
MEM_STARTUP: 1024 #optional
volumes:
- ./config:/config
ports:
- 8443:8443
- 3478:3478/udp
- 10001:10001/udp
- 8080:8080
- 1900:1900/udp #optional
- 8843:8843 #optional
- 8880:8880 #optional
- 6789:6789 #optional
- 5514:5514/udp #optional
docker compose up -d
cd /home/myusername/docker
mkdir unifi-protect && cd "$_"
x84 version WE ARE TESTING THIS
version: "3.8"
services:
unifi-protect-x86:
image: markdegroot/unifi-protect-x86:latest
container_name: unifi-protect-x86
restart: unless-stopped
tmpfs:
- /srv/unifi-protect/temp
ports:
- 7080:7080
- 7443:7443
- 7444:7444
- 7447:7447
- 7550:7550
- 7442:7442
mem_limit: 2048m
volumes:
- ./db:/var/lib/postgresql/10/main
- ./data:/srv/unifi-protect
ARM64 version WE ARE TESTING THIS
version: "3.8"
services:
unifi-protect:
image: markdegroot/unifi-protect-arm64
container_name: unifi-protect
environment:
STORAGE_DISK: /dev/sda1
command: [ "sh", "-c", "systemd" ]
privileged: true
tmpfs:
- /run
- /run/lock
- /tmp
volumes:
- ./cgroup:/sys/fs/cgroup:ro
- ./srv:/srv
- ./data:/data
- ./persistent:/persistent
network_mode: host
docker compose up -d
cd /home/myusername/docker
mkdir netdata && cd "$_"
version: "3.8"
services:
netdata:
image: netdata/netdata
container_name: netdata
restart: unless-stopped
# hostname: example.com # Optional set to fqdn of host
cap_add:
- SYS_PTRACE
security_opt:
- apparmor:unconfined
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /etc/os-release:/host/etc/os-release:ro
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
# - ./netdataconfig:/etc/netdata # Optional
# - ./netdatalib:/var/lib/netdata # Optional
# - ./netdatacache:/var/cache/netdata # Optional
ports:
- 8166:19999
docker compose up -d
cd /home/myusername/docker
mkdir mydiscordbot01 && cd "$_"
version: "3.8"
services:
mydiscordbot01:
image: jonasbonno/discordbot:latest
container_name: mydiscordbot01
restart: unless-stopped
environment:
TOKEN: IMPORT_DISCORD_BOT_TOKEN_HERE
volumes:
- ./data:/data
docker compose up -d
Recommended domain setup when using reverse proxies like traefik.
Server | Local | Domain |
---|---|---|
MorningStar WebSocket | ws://127.0.0.1:2096 | game.example.com |
Assets Server | http://127.0.0.1:8080 | assets.example.com |
CMS | http://127.0.0.1:8081 | example.com |
Nitro Client | http://127.0.0.1:3080 | game.example.com |
cd /home/myusername/docker
git clone https://github.com/Gurkengewuerz/nitro-docker.git && \
cd nitro-docker/ && \
git clone https://git.krews.org/morningstar/arcturus-morningstar-default-swf-pack.git assets/swf/ && \
git clone https://github.com/krewsarchive/default-assets.git assets/assets/ && \
wget https://github.com/billsonnn/nitro-react/files/10334858/room.nitro.zip && \
unzip -o room.nitro.zip -d assets/assets/bundled/generic && \
find . -type f -name 'example-*' | while read -r file; do new_file=$(echo "$file" | sed 's/example-//'); cp -rf "$file" "$new_file" && rm "$file"; done
Configure the .env to your needs
docker compose up db -d
or download full pack
arcturus-community-ms4-dev-sqlupdates.zip
Default login creditentions using HeidySQL:
Network type:MariaDB or MySQL (TCP/IP)
Library:libmariadb.dll
Hostname /IP:IMPORT_SERVER_URL_HERE
User:arcturus_user
Password:arcturus_pw
Port:3310
Databases:Separated by semicolon
For the popup: Really auto-detect file encoding? click on Yes
Extract sqlupdates.zip then go to File --> Run SQL file... Run the following SQL files in order:
arcturus_3.0.0-stable_base_database--compact.sql
3_0_0 to 3_5_0.sql
3_5_0 to 4_0_0.sql
4_0_0_pets_EN.sql
4_0_0_permissions.sql
perms_groups.sql
UPDATE emulator_settings SET `value`='http://127.0.0.1:8080/usercontent/camera/' WHERE `key`='camera.url';
UPDATE emulator_settings SET `value`='/app/assets/usercontent/camera/' WHERE `key`='imager.location.output.camera';
UPDATE emulator_settings SET `value`='/app/assets/usercontent/camera/thumbnail/' WHERE `key`='imager.location.output.thumbnail';
UPDATE emulator_settings SET `value`='http://127.0.0.1:8080/api/imageproxy/0x0/http://img.youtube.com/vi/%video%/default.jpg' WHERE `key`='imager.url.youtube';
UPDATE emulator_settings SET `value`='0' WHERE `key`='console.mode';
UPDATE emulator_settings SET `value`='/app/assets/usercontent/badgeparts/generated/' WHERE `key`='imager.location.output.badges';
UPDATE emulator_settings SET `value`='/app/assets/swf/c_images/Badgeparts' WHERE `key`='imager.location.badgeparts';
docker compose up assets -d && \
docker compose up assets-build --build && \
docker compose up imgproxy --build -d && \
docker compose up arcturus --build -d
nitro/renderer-config.json
and nitro/ui-config.json
values to your setup. If the deployment is buggy or throws any errors check the json files for updates. then Build and Start Nitrodocker compose up nitro --build -d
habbo-downloader requires Node.js 15.0 or higher you can install the newest version with the following command:
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash && \
nvm install node
apt install npm -y && \
npm i -g habbo-downloader && \
rm -rf assets/swf/gordon/PRODUCTION && \
habbo-downloader --output ./assets/swf --domain com --command badgeparts && \
habbo-downloader --output ./assets/swf --domain com --command badges && \
habbo-downloader --output ./assets/swf --domain com --command clothes && \
habbo-downloader --output ./assets/swf --domain com --command effects && \
habbo-downloader --output ./assets/swf --domain com --command furnitures && \
habbo-downloader --output ./assets/swf --domain com --command gamedata && \
habbo-downloader --output ./assets/swf --domain com --command gordon && \
habbo-downloader --output ./assets/swf --domain com --command hotelview && \
habbo-downloader --output ./assets/swf --domain com --command icons && \
habbo-downloader --output ./assets/swf --domain com --command mp3 && \
habbo-downloader --output ./assets/swf --domain com --command pets && \
habbo-downloader --output ./assets/swf --domain com --command promo && \
cp -n assets/swf/dcr/hof_furni/icons/* assets/swf/dcr/hof_furni && \
mv assets/swf/gordon/PRODUCTION* assets/swf/gordon/PRODUCTION && \
docker compose up assets-build --build
Replace --domain de with your own country code
For example if you want Dutch then do --domain nl
Here is a list of supported country codes:
Portugese --domain com.br
Turkish --domain com.tr
English --domain com
German --domain de
Spanish --domain es
Finnish --domain fi
French --domain fr
Italian --domain it
Dutch --domain nl
habbo-downloader --output ./assets/translation --domain com --command gamedata && \
cd ./assets/translation && \
cp -rf gamedata/external*.txt ../swf/gamedata/ && \
cd ../.. && \
docker compose up assets-build --build && \
cd ./assets/translation && \
python FurnitureDataTranslator.py && \
python SQLGenerator.py && \
python external_text.py --domain com
docker compose restart arcturus
change .cms.env to your needs
docker compose up cms --build -d
docker compose exec cms php artisan key:generate
Open the CMS in the browser by default http://127.0.0.1:8081 and do the basic setup.
UPDATE website_settings SET `value` = 'http://127.0.0.1:8080/api/imager/?figure=' WHERE `key` = 'avatar_imager';
UPDATE website_settings SET `value` = 'http://127.0.0.1:8080/swf/c_images/album1584' WHERE `key` = 'badges_path';
UPDATE website_settings SET `value` = 'http://127.0.0.1:8080/usercontent/badgeparts/generated' WHERE `key` = 'group_badge_path';
UPDATE website_settings SET `value` = 'http://127.0.0.1:8080/swf/dcr/hof_furni' WHERE `key` = 'furniture_icons_path';
UPDATE website_settings SET `value` = 'arcturus' WHERE `key` = 'rcon_ip';
UPDATE website_settings SET `value` = '3001' WHERE `key` = 'rcon_port';
UPDATE website_settings SET `value` = '4' WHERE `key` = 'min_staff_rank';
UPDATE website_settings SET `value` = '5' WHERE `key` = 'min_maintenance_login_rank';
UPDATE website_settings SET `value` = '6' WHERE `key` = 'min_housekeeping_rank';
UPDATE emulator_settings SET `value`='*.example.com' WHERE `key`='websockets.whitelist';
Optional.
bash ./export_containers.sh
docker compose exec backup backup-now
7z a -mx=9 nitro-$(date -d "today" +"%Y%m%d_%H%M").7z ./ '-x!db/data' '-x!.git/' '-x!logs/' '-x!cache/'
🇶 Handshake Failed
Login without an SSO ticket is not supported use http://127.0.0.1:3000?sso=123
cd /home/myusername/docker
mkdir -p gitea/data/gitea/public/assets/css && cd gitea && mkdir actrunner && cd "$_" && touch act-config.yml && cd ..
version: "3.8"
services:
db:
image: postgres:latest
container_name: gitea-postgresql
restart: unless-stopped
environment:
POSTGRES_DB: gitea
POSTGRES_USER: gitea
POSTGRES_PASSWORD: gitea!
volumes:
- ./db:/var/lib/postgresql/data
gitea:
image: gitea/gitea:latest
container_name: gitea
restart: unless-stopped
environment:
USER_UID: 1000
USER_GID: 1000
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: db
GITEA__database__NAME: gitea
GITEA__database__USER: gitea
GITEA__database__PASSWD: gitea!
volumes:
- ./data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 8223:3000
- 8224:22 # Optional for SSH Access
depends_on:
- db
actrunner:
image: gitea/act_runner:latest
container_name: gitea-act-runner
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./actrunner/data:/data
- ./actrunner/act-config.yml:/act-config.yml
environment:
GITEA_RUNNER_NAME: gitea-act-runner
GITEA_INSTANCE_URL: http://localhost:8223
GITEA_RUNNER_REGISTRATION_TOKEN: insert_registration_token_here
docker compose up -d
Go to: Site Administration -> Actions -> Runners
Click on: Create new Runner
Copy and paste the REGISTRATION TOKEN into docker-compose.yml at actrunner replace the insert_registration_token_here
docker compose up -d
cd /home/myusername/docker/gitea/conf
nano app.ini
Insert the following lines at the bottom:
[ui]
THEMES = github, my-custom-theme
DEFAULT_THEME = github
Download the following theme file:
Upload the file to:
cd /home/myusername/docker/gitea/public/assets/css
cd /home/myusername/docker/gitea/data/gitea
mkdir templates && cd "$_"
Download and Import the following files into the templates folder
cd /home/myusername/docker/gitea/data/gitea/public/assets/css
Download and Import the following css file into the css folder
cd /home/myusername/docker/gitea/data/gitea/templates
mkdir -p user/auth && cd "$_"
Download and Import the following files into the auth folder
link_account.tmpl
signup.tmpl
signin.tmpl
Go to: Admin interface -> Flows & Stages -> Flows
Edit default-authentication-flow
Click on Appearance settings
and upload a custom Background image
cd /home/myusername/docker/gitea/data/gitea/conf
nano app.ini
[mailer]
ENABLED = true
FROM = [email protected]
MAILER_TYPE = smtp
SMTP_ADDR = mail.DOMAIN.COM
SMTP_PORT = 587
IS_TLS_ENABLED = false
USER = [email protected]
PASSWD = REPLACE_WITH_YOUR_EMAIL_PASSWORD
Make sure to save the Client ID and Client Secret because you need it later
Make sure to save the OpenID Configuration URL because you need it later
Site Administration > Identity & Access > Authentication Sources
Add Authentication Source
Add Authentication Source
OAuth2
Authentik
OpenID Connect
PASTE_THE_CLIENT_ID_YOU_SAVED
PASTE_THE_CLIENT_SECRET_YOU_SAVED
https://authentik.DOMAIN.COM/static/dist/assets/icons/icon.svg
PASTE_THE_OPENID_CONFIGURATION_URL_YOU_SAVED
email profile
cd /home/myusername/docker
mkdir gitlab && cd "$_"
version: "3.8"
services:
gitlab-runner:
image: gitlab/gitlab-runner:alpine
container_name: gitlab-runner
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./gitlab-runner:/etc/gitlab-runner
depends_on:
- web
web:
image: gitlab/gitlab-ce:latest
container_name: gitlab-ce
hostname: gitlab.DOMAIN.COM
restart: unless-stopped
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url "https://gitlab.DOMAIN.COM"
nginx['listen_https'] = false
nginx['redirect_http_to_https'] = false
nginx['listen_port'] = 80
letsencrypt['enable'] = false
gitlab_rails['ldap_enabled'] = false
gitlab_rails['gitlab_shell_ssh_port'] = 22
gitlab_rails['gitlab_username_changing_enabled'] = true
# SMTP email
gitlab_rails['gitlab_email_enabled'] = true
gitlab_rails['gitlab_email_from'] = "[email protected]"
gitlab_rails['gitlab_email_reply_to'] = "[email protected]"
gitlab_rails['smtp_enable'] = true
gitlab_rails['smtp_address'] = "mail.DOMAIN.COM"
gitlab_rails['smtp_port'] = 587
gitlab_rails['smtp_user_name'] = "[email protected]"
gitlab_rails['smtp_password'] = "passwordhere"
gitlab_rails['smtp_domain'] = "www.DOMAIN.COM"
gitlab_rails['smtp_authentication'] = "plain"
gitlab_rails['smtp_enable_starttls_auto'] = true
gitlab_rails['smtp_tls'] = false
# Reply by email
#gitlab_rails['incoming_email_enabled'] = true
#gitlab_rails['incoming_email_address'] = "git+%{key}@DOMAIN.COM"
#gitlab_rails['incoming_email_email'] = "[email protected]"
#gitlab_rails['incoming_email_password'] = "EMAILPASSWORDHERE"
#gitlab_rails['incoming_email_mailbox_name'] = "inbox"
#gitlab_rails['incoming_email_idle_timeout'] = 60
#gitlab_rails['incoming_email_host'] = "mail.DOMAIN.COM"
#gitlab_rails['incoming_email_port'] = 587
#gitlab_rails['incoming_email_ssl'] = true
#gitlab_rails['incoming_email_start_tls'] = false
# Docker registry
#registry_external_url "https://registrygl.DOMAIN.COM"
#gitlab_rails['registry_enabled'] = true
#gitlab_rails['registry_api_url'] = "https://registrygl.DOMAIN.COM"
#registry['enable'] = true
#registry_nginx['enable'] = false
#registry['registry_http_addr'] = "0.0.0.0:5000"
# GitLab Pages
#pages_external_url "https://pages.DOMAIN.COM"
#gitlab_pages['enable'] = true
#pages_nginx['enable'] = true
#pages_nginx['listen_https'] = false
#pages_nginx['redirect_http_to_https'] = true
#pages_nginx['listen_port'] = 5100
#pages_nginx['proxy_set_headers'] = {"X-Forwarded-Proto" => "https","X-Forwarded-Ssl" => "on"}
volumes:
- ./config:/etc/gitlab
- ./logs:/var/log/gitlab
- ./data:/var/opt/gitlab
ports:
- 8225:80
- 8226:443
- 5000:5000
- 5005:5005
- 5100:5100
- 5050:5050
# - 22:22
# - 587:587
docker compose up -d
DEFAULT LOGIN
Username:root
Password: Run the command below to get the password
docker exec -it gitlab-ce grep 'Password:' /etc/gitlab/initial_root_password
docker exec -it gitlab-ce gitlab-rails console
Notify.test_email('[email protected]', 'Message Subject', 'Message Body').deliver_now
docker exec -it gitlab-ce gitlab-ctl reconfigure
docker exec -it gitlab-runner gitlab-runner register --url "https://gitlab.DOMAIN.COM" --token INSERTTOKENHERE --description "gitlab-docker-runner"
docker exec -it gitlab-runner gitlab-runner register --non-interactive --url "https://gitlab.DOMAIN.COM/" --token INSERTTOKENHERE --executor "docker" --docker-image alpine:latest --description "gitlab-docker-runner"
docker exec -it gitlab-runner gitlab-runner run
Put the following config into the traefik fileConfig.yml
# Gitlab router
gitlab-ce:
entryPoints:
- https
rule: 'Host(`gitlab.DOMAIN.COM`)'
service: gitlab-ce
# middlewares:
# - "auth"
# Pages router
pages:
entryPoints:
- websecure
rule: 'Host(`pages.DOMAIN.COM`)'
service: pages
tls:
certResolver: cloudflare
domains:
- main: gitlab.DOMAIN.COM
sans:
- '*.gitlab.DOMAIN.COM'
- '*.pages.DOMAIN.COM'
middlewares:
- pages-redirectscheme
# Pages-Wildcard router
pages-wildcard:
entryPoints:
- websecure
rule: 'HostRegexp(`pages.DOMAIN.COM`, `{sub:[a-zA-Z0-9-]+}.pages.DOMAIN.COM`)'
service: pages-wildcard
tls:
certResolver: cloudflare
domains:
- main: gitlab.DOMAIN.COM
sans:
- '*.gitlab.DOMAIN.COM'
- '*.pages.DOMAIN.COM'
middlewares:
- pages-wildcard-redirectscheme
# Gitlab service
gitlab-ce:
loadBalancer:
servers:
- url: http://192.168.1.95:8225
# Pages service
pages:
loadBalancer:
passHostHeader: true
servers:
- url: http://127.0.0.1:5100
# Pages-Wildcard service
pages-wildcard:
loadBalancer:
passHostHeader: true
servers:
- url: http://127.0.0.1:5100
Put the following config into the traefik fileConfig.yml
# GitLab - registry router
gitlab-registry:
entryPoints:
- https
rule: 'Host(`registrygl.DOMAIN.COM`)'
service: gitlab-registry
# GitLab - registry service
gitlab-registry:
loadBalancer:
servers:
- url: http://192.168.1.95:5000
Advanced protocol settings
Make sure to save the Slug because you need it later for idp_sso_target_url
Go to: Admin interface > System > Certificates
Click on: authentik Self-signed Certificate
Make sure to save the Certificate Fingerprint (SHA1) because you need it later for idp_cert_fingerprint
gitlab_rails['omniauth_enabled'] = true # Set to false if you wanna disable auth like authentik
gitlab_rails['omniauth_allow_single_sign_on'] = ['saml']
gitlab_rails['omniauth_sync_email_from_provider'] = 'saml'
gitlab_rails['omniauth_sync_profile_from_provider'] = ['saml']
gitlab_rails['omniauth_sync_profile_attributes'] = ['email']
gitlab_rails['omniauth_auto_sign_in_with_provider'] = 'saml'
gitlab_rails['omniauth_block_auto_created_users'] = false
gitlab_rails['omniauth_auto_link_saml_user'] = true
gitlab_rails['omniauth_providers'] = [
{
name: 'saml',
args: {
assertion_consumer_service_url: 'https://gitlab.DOMAIN.COM/users/auth/saml/callback',
# Shown when navigating to certificates in authentik
idp_cert_fingerprint: '4E:1E:CD:67:4A:67:5A:E9:6A:D0:3C:E6:DD:7A:F2:44:2E:76:00:6A',
idp_sso_target_url: 'https://authentik.DOMAIN.COM/application/saml/gitlab/sso/binding/redirect/',
issuer: 'https://gitlab.DOMAIN.COM',
name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent',
attribute_statements: {
email: ['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress'],
first_name: ['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name'],
nickname: ['http://schemas.goauthentik.io/2021/02/saml/username']
}
},
label: 'authentik'
}
]
cd /home/myusername/docker
mkdir changedetection && cd "$_"
version: "3.8"
services:
changedetection:
image: ghcr.io/dgtlmoon/changedetection.io
container_name: changedetection
restart: unless-stopped
volumes:
- ./datastore:/datastore
# - ./static:/app/changedetectionio/static
environment:
PUID: 1000
PGID: 1000
LOGGER_LEVEL: DEBUG # Log output levels: TRACE, DEBUG(default), INFO, SUCCESS, WARNING, ERROR, CRITICAL
# BASE_URL: https://cdetect.DOMAIN.COM # Base URL of your changedetection.io install (Added to the notification alert)
HIDE_REFERER: true # Hides the `Referer` header so that monitored websites can't see the changedetection.io hostname.
# FETCH_WORKERS: 10 # Default number of parallel/concurrent fetchers
PLAYWRIGHT_DRIVER_URL: ws://changedetection-chrome:3000/
ports:
- 8167:5000
depends_on:
- playwright-chrome
condition: service_started
playwright-chrome:
image: dgtlmoon/sockpuppetbrowser:latest
container_name: changedetection_chrome
restart: unless-stopped
hostname: changedetection-chrome
cap_add:
- SYS_ADMIN
# ports:
# - 8168:3000
environment:
SCREEN_WIDTH: 1920
SCREEN_HEIGHT: 1024
SCREEN_DEPTH: 16
MAX_CONCURRENT_CHROME_PROCESSES: 10
docker compose up -d
cd /home/myusername/docker
mkdir spacebar && cd "$_"
version: "3.8"
services:
spacebar-server:
image: ccgurley/spacebar-server:latest
container_name: spacebar-server
restart: unless-stopped
environment:
CONFIG_PATH: /spacebar-server/db/config.json
volumes:
- ./db:/spacebar-server/db/
ports:
- 8400:3001
spacebar-client:
image: ccgurley/spacebar-client:latest
container_name: spacebar-client
restart: unless-stopped
environment:
SERVER_API: https://api.spacebar.chat
SERVER_CDN: https://cdn.spacebar.chat
SERVER_GATEWAY: wss://gateway.spacebar.chat
ports:
- 8401:80
docker compose up -d
cd /home/myusername/docker
mkdir lancache && cd "$_"
## See the "Settings" section in README.md for more details
## Set this to true if you're using a load balancer, or set it to false if you're using seperate IPs for each service.
## If you're using monolithic (the default), leave this set to true
USE_GENERIC_CACHE=true
## IP addresses that the lancache monolithic instance is reachable on
## Specify one or more IPs, space separated - these will be used when resolving DNS hostnames through lancachenet-dns. Multiple IPs can improve cache priming performance for some services (e.g. Steam)
## Note: This setting only affects DNS, monolithic and sniproxy will still bind to all IPs by default
LANCACHE_IP=10.0.39.1
## IP address on the host that the DNS server should bind to
DNS_BIND_IP=10.0.39.1
## DNS Resolution for forwarded DNS lookups
UPSTREAM_DNS=8.8.8.8
## Storage path for the cached data
## Note that by default, this will be a folder relative to the docker-compose.yml file
CACHE_ROOT=./lancache
## Change this to customise the size of the disk cache (default 2000g)
## If you have more storage, you'll likely want to increase this
## The cache server will prune content on a least-recently-used basis if it
## starts approaching this limit.
## Set this to a little bit less than your actual available space
CACHE_DISK_SIZE=2000g
## Change this to allow sufficient index memory for the nginx cache manager (default 500m)
## We recommend 250m of index memory per 1TB of CACHE_DISK_SIZE
CACHE_INDEX_SIZE=500m
## Change this to limit the maximum age of cached content (default 3650d)
CACHE_MAX_AGE=3650d
## Set the timezone for the docker containers, useful for correct timestamps on logs (default Europe/London)
## Formatted as tz database names. Example: Europe/Oslo or America/Los_Angeles
TZ=Etc/UTC
version: "3.8"
services:
dns:
image: lancachenet/lancache-dns:latest
container_name: lancache
restart: unless-stopped
env_file: .env
ports:
- ${DNS_BIND_IP}:53:53/udp # This is provided in the .env file!
- ${DNS_BIND_IP}:53:53/tcp # This is provided in the .env file!
## HTTPS requests are now handled in monolithic directly
## you could choose to return to sniproxy if desired
#
# sniproxy:
# image: lancachenet/sniproxy:latest
# container_name: lancache-sniproxy
# restart: unless-stopped
# env_file: .env
# ports:
# - 443:443/tcp
monolithic:
image: lancachenet/monolithic:latest
container_name: lancache-monolithic
restart: unless-stopped
env_file: .env
volumes:
- ${CACHE_ROOT}/cache:/data/cache # This is provided in the .env file!
- ${CACHE_ROOT}/logs:/data/logs # This is provided in the .env file!
ports:
- 80:80/tcp
- 443:443/tcp
docker compose up -d
cd /home/myusername/docker
mkdir gotify && cd "$_"
version: "3.8"
services:
gotify:
image: gotify/server
container_name: gotify
restart: unless-stopped
ports:
- 9210:80
environment:
TZ: Etc/UTC
volumes:
- ./data:/app/data
docker compose up -d
To use watchtower with Gotify make sure to deploy the Gotify container first from above
Currently Watchtower is set to notification only, if you want that watchtower automaticly updates all containers then setWATCHTOWER_MONITOR_ONLY: false
andWATCHTOWER_NO_PULL: false
cd /home/myusername/docker
mkdir watchtower && cd "$_"
version: "3.8"
services:
watchtower:
image: containrrr/watchtower:latest
container_name: watchtower
restart: unless-stopped
environment:
TZ: Etc/UTC
NO_COLOR: true
WATCHTOWER_SCHEDULE: 0 0 19 * * *
WATCHTOWER_INCLUDE_STOPPED: true
WATCHTOWER_INCLUDE_RESTARTING: true
WATCHTOWER_NOTIFICATIONS_HOSTNAME: watchtower
WATCHTOWER_NOTIFICATIONS: gotify
WATCHTOWER_NOTIFICATION_GOTIFY_URL: IMPORT_GOTIFY_URL_HERE
WATCHTOWER_NOTIFICATION_GOTIFY_TOKEN: IMPORT_WATCHTOWER_TOKEN_HERE
WATCHTOWER_NOTIFICATION_GOTIFY_TLS_SKIP_VERIFY: true
WATCHTOWER_MONITOR_ONLY: true # If set to true then also set WATCHTOWER_NO_PULL into true
WATCHTOWER_NO_PULL: true # Set to true if WATCHTOWER_MONITOR_ONLY is set into true
volumes:
- /var/run/docker.sock:/var/run/docker.sock
docker compose up -d
Default login
Username:admin
Password:admin
Watchtower
WATCHTOWER_NOTIFICATION_GOTIFY_TOKEN
then replace the following:IMPORT_WATCHTOWER_TOKEN_HERE
replace with token that you copieddocker compose up -d
cd /home/myusername/docker
mkdir wud && cd "$_"
version: "3.8"
services:
whatsupdocker:
image: ghcr.io/fmartinou/whats-up-docker:latest
container_name: wud
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8900:3000
docker compose up -d
cd /home/myusername/docker
mkdir ansiblesemaphore && cd "$_"
version: "3.8"
services:
semaphore:
image: semaphoreui/semaphore:latest
container_name: semaphore
restart: unless-stopped
environment:
SEMAPHORE_DB_DIALECT: mysql
SEMAPHORE_DB_HOST: mariadb
SEMAPHORE_DB_PORT: 3306
SEMAPHORE_DB: semaphore
SEMAPHORE_DB_USER: semaphore
SEMAPHORE_DB_PASS: semaphore!
SEMAPHORE_PLAYBOOK_PATH: /tmp/semaphore/
SEMAPHORE_ADMIN_PASSWORD: semaphore
SEMAPHORE_ADMIN_NAME: admin
SEMAPHORE_ADMIN_EMAIL: [email protected]
SEMAPHORE_ADMIN: admin
SEMAPHORE_ACCESS_KEY_ENCRYPTION: EUZi3BfINyLtUr0uZKfzGx+J14OfSTpUwLWTcks1dk0= # To generate a random key use: head -c32 /dev/urandom | base64
ANSIBLE_HOST_KEY_CHECKING: false # (optional) change to true if you want to enable host key checking
user: "${UID}:${GID}"
volumes:
- ./inventory/:/inventory:ro
- ./authorized-keys/:/authorized-keys:ro
- ./config/:/etc/semaphore:rw
ports:
- 9300:3000
depends_on:
- db
db:
image: mariadb:latest
container_name: semaphore-mariadb
hostname: mariadb
restart: unless-stopped
volumes:
- ./db:/var/lib/mysql
environment:
MYSQL_DATABASE: semaphore
MYSQL_USER: semaphore
MYSQL_PASSWORD: semaphore!
MYSQL_ROOT_PASSWORD: semaphore!!
docker compose up -d
cd /home/myusername/docker
mkdir owncast && cd "$_"
version: "3.8"
services:
owncast:
image: owncast/owncast:latest
container_name: owncast
restart: unless-stopped
volumes:
- ./data:/app/data
ports:
- 8310:8080
- 8311:1935
tty: true
docker compose up -d
cd /home/myusername/docker
mkdir mealie && cd "$_"
version: "3.8"
services:
mealie:
image: ghcr.io/mealie-recipes/mealie:nightly
container_name: mealie
restart: unless-stopped
environment:
ALLOW_SIGNUP: true
PUID: 1000
PGID: 1000
TZ: America/Anchorage
MAX_WORKERS: 1
WEB_CONCURRENCY: 1
BASE_URL: https://mealie.DOMAIN.COM
DB_ENGINE: postgres
POSTGRES_USER: mealie
POSTGRES_PASSWORD: mealie!
POSTGRES_SERVER: postgres
POSTGRES_PORT: 5432
POSTGRES_DB: mealie
ports:
- 8228:9000
volumes:
- ./data:/app/data/
depends_on:
- db
db:
image: postgres:15
container_name: postgres
restart: unless-stopped
environment:
POSTGRES_USER: mealie
POSTGRES_PASSWORD: mealie!
volumes:
- ./db:/var/lib/postgresql/data
docker compose up -d
cd /home/myusername/docker
mkdir jenkins && cd "$_"
version: "3.8"
services:
jenkins:
image: jenkins/jenkins:lts
container_name: jenkins
restart: unless-stopped
privileged: true
user: root
ports:
- 50000:50000
- 50001:8080
volumes:
- ./data:/var/jenkins_home
- /var/run/docker.sock:/var/run/docker.sock
socat:
image: alpine/socat
container_name: socat
restart: unless-stopped
command: tcp-listen:2375,fork,reuseaddr unix-connect:/var/run/docker.sock
ports:
- 2376:2375
volumes:
- /var/run/docker.sock:/var/run/docker.sock
docker compose up -d
cd /home/myusername/docker
mkdir jenkins && cd "$_"
docker network create jenkins
docker run \
--name jenkins-docker \
--rm \
--detach \
--privileged \
--network jenkins \
--network-alias docker \
--env DOCKER_TLS_CERTDIR=/certs \
--volume ./client-certs:/certs/client \
--volume ./data:/var/jenkins_home \
--publish 2376:2376 \
docker:dind \
--storage-driver overlay2
nano dockerfile
# Use the base Jenkins image
FROM jenkins/jenkins:jdk21
# Switch to the root user to install packages
USER root
# Install lsb-release
RUN apt-get update && apt-get install -y lsb-release
# Add Docker GPG key
RUN curl -fsSLo /usr/share/keyrings/docker-archive-keyring.asc \
https://download.docker.com/linux/debian/gpg
# Add Docker repository
RUN echo "deb [arch=$(dpkg --print-architecture) \
signed-by=/usr/share/keyrings/docker-archive-keyring.asc] \
https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list
# Install Docker CLI
RUN apt-get update && apt-get install -y docker-ce-cli
# Switch back to the Jenkins user
USER jenkins
# Install Jenkins plugins using jenkins-plugin-cli
RUN jenkins-plugin-cli --plugins "blueocean docker-workflow"
docker build -t jenkins-blueocean:jdk21 .
version: "3.8"
services:
jenkins-blueocean:
image: jenkins-blueocean:jdk21
container_name: jenkins-blueocean
restart: unless-stopped
privileged: true
user: root
environment:
DOCKER_HOST: tcp://docker:2376
DOCKER_CERT_PATH: /certs/client
DOCKER_TLS_VERIFY: 1
ports:
- 50000:50000
- 50001:8080
volumes:
- ./data:/var/jenkins_home
- ./client-certs:/certs/client:ro
- /var/run/docker.sock:/var/run/docker.sock
docker compose up -d
cd /home/myusername/docker
https://github.com/SillyTavern/SillyTavern.git && cd SillyTavern
docker build -t sillytavern:release . && cd docker
version: "3.8"
services:
sillytavern:
build: ..
image: sillytavern:release
container_name: sillytavern
restart: unless-stopped
hostname: sillytavern
ports:
- 8123:8000
volumes:
- ./config:/home/node/app/config
- ./user:/home/node/app/public/user
docker compose up -d
cd /home/myusername/docker
mkdir immich && cd "$_"
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# Connection secrets for postgres and typesense. You should change these to random passwords
TYPESENSE_API_KEY=;B^`65fTeX6c%XbaWEH
DB_PASSWORD=immich!
# DO NOT TOUCH THE LINES BELOW
###################################################################################
DB_HOSTNAME=immich_postgres
DB_USERNAME=postgres
DB_DATABASE_NAME=immich
REDIS_HOSTNAME=immich_redis
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:release
restart: unless-stopped
volumes:
- ./library:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 8295:2283
depends_on:
- redis
- database
healthcheck:
disable: false
networks:
- production
immich-machine-learning:
image: ghcr.io/immich-app/immich-machine-learning:release
container_name: immich_machine_learning
restart: unless-stopped
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- ./model-cache:/cache
env_file:
- .env
healthcheck:
disable: false
networks:
- production
redis:
image: docker.io/redis:6.2-alpine@sha256:2ba50e1ac3a0ea17b736ce9db2b0a9f6f8b85d4c27d5f5accc6a416d8f42c6d5
container_name: immich_redis
restart: unless-stopped
healthcheck:
test: redis-cli ping || exit 1
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
env_file:
- .env
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
- ./db:/var/lib/postgresql/data
restart: unless-stopped
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command:
[
'postgres',
'-c',
'shared_preload_libraries=vectors.so',
'-c',
'search_path="$$user", public, vectors',
'-c',
'logging_collector=on',
'-c',
'max_wal_size=2GB',
'-c',
'shared_buffers=512MB',
'-c',
'wal_compression=on',
]
docker compose up -d
cd /home/myusername/docker
mkdir it_tools && cd "$_"
version: "3.8"
services:
it_tools:
image: ghcr.io/corentinth/it-tools:latest
container_name: it_tools
restart: unless-stopped
ports:
- 8270:80
docker compose up -d
cd /home/myusername/docker
mkdir paperless && cd "$_"
version: "3.8"
services:
broker:
image: docker.io/library/redis:latest
container_name: paperless_redis
restart: unless-stopped
volumes:
- ./redis:/data
networks:
- production
db:
image: docker.io/library/postgres:latest
container_name: paperless_postgres
restart: unless-stopped
volumes:
- ./db:/var/lib/postgresql/data
environment:
POSTGRES_DB: paperless
POSTGRES_USER: paperless
POSTGRES_PASSWORD: paperless
networks:
- production
webserver:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
container_name: paperless
restart: unless-stopped
depends_on:
- db
- broker
- gotenberg
- tika
ports:
- "8280:8000"
volumes:
- ./data:/usr/src/paperless/data
- ./media:/usr/src/paperless/media
- ./export:/usr/src/paperless/export
- ./consume:/usr/src/paperless/consume
env_file: .env
environment:
PAPERLESS_REDIS: redis://broker:6379
PAPERLESS_DBHOST: db
PAPERLESS_TIKA_ENABLED: 1
PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://gotenberg:3000
PAPERLESS_TIKA_ENDPOINT: http://tika:9998
PAPERLESS_URL: https://paperless.DOMAIN.COM
PAPERLESS_ADMIN_USER: paperless
PAPERLESS_ADMIN_PASSWORD: paperless
PAPERLESS_APPS: "allauth.socialaccount.providers.openid_connect"
PAPERLESS_SOCIALACCOUNT_PROVIDERS: '{"openid_connect": {"APPS": [{"provider_id": "authentik","name": "Authentik SSO","client_id": "IMPORT_CLIENT_ID_HERE","secret": "IMPORT_SECRET_HERE","settings": { "server_url": "IMPORT_SERVER_URL_HERE"}}]}}'
networks:
- production
gotenberg:
image: docker.io/gotenberg/gotenberg:latest
container_name: paperless_gotenberg
restart: unless-stopped
# The gotenberg chromium route is used to convert .eml files. We do not
# want to allow external content like tracking pixels or even javascript.
command:
- "gotenberg"
- "--chromium-disable-javascript=true"
- "--chromium-allow-list=file:///tmp/.*"
networks:
- production
tika:
image: ghcr.io/paperless-ngx/tika:latest
container_name: paperless_tika
restart: unless-stopped
networks:
- production
networks:
production:
external: true
docker compose up -d
Paperless
default-authentication-flow (Welcome to authentik!)
default-provider-authorization-explicit-consent (Authorize Application)
Make sure to save the Client ID and Client Secret because you need it later for PAPERLESS_SOCIALACCOUNT_PROVIDERS in docker-compose.yml
Paperless
paperless-ngx
Paperless
Make sure to save the OpenID Configuration URL because you need it later for PAPERLESS_SOCIALACCOUNT_PROVIDERS in docker-compose.yml
PAPERLESS_SOCIALACCOUNT_PROVIDERS
then replace the following:IMPORT_CLIENT_ID_HERE
replace with Client IDIMPORT_SECRET_HERE
replace with Client SecretIMPORT_SERVER_URL_HERE
replace with OpenID Configuration URLcd /home/myusername/docker
mkdir azuracast && cd "$_"
services:
azuracast:
image: ghcr.io/azuracast/azuracast:latest
container_name: azuracast
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
APPLICATION_ENV: production # Valid options: production, development, testing
COMPOSER_PLUGIN_MODE: false
SHOW_DETAILED_ERRORS: false
AUTO_ASSIGN_PORT_MIN: 8500
AUTO_ASSIGN_PORT_MAX: 8600
# Database Configuration
MYSQL_DATABASE: azuracast
MYSQL_USER: azuracast
MYSQL_PASSWORD: azuracast!
MYSQL_ROOT_PASSWORD: azuracast!!
MYSQL_HOST: localhost
MYSQL_PORT: 3306
MYSQL_SLOW_QUERY_LOG: 0
MYSQL_MAX_CONNECTIONS: 100
MYSQL_INNODB_BUFFER_POOL_SIZE: 128M
MYSQL_INNODB_LOG_FILE_SIZE: 16M
# Redis Configuration
ENABLE_REDIS: true
REDIS_HOST: localhost
REDIS_PORT: 6379
REDIS_DB: 1
# Advanced Configuration
ports:
- 9150:80
# - 9151:443 # optional
# - 9152:2022 # SFTP optional
- 11500-11600:8500-8600 # Ports to assign radio stations https://www.azuracast.com/docs/administration/docker/#using-non-standard-ports
volumes:
- ./stations:/var/azuracast/stations
- ./backups:/var/azuracast/backups
- ./db:/var/lib/mysql
- ./storage/uploads:/var/azuracast/storage/uploads
- ./storage/shoutcast2:/var/azuracast/storage/shoutcast2
- ./storage/stereo_tool:/var/azuracast/storage/stereo_tool
- ./storage/geoip:/var/azuracast/storage/geoip
- ./storage/sftpgo_data:/var/azuracast/storage/sftpgo
- ./storage/acme:/var/azuracast/storage/acme
ulimits:
nofile:
soft: 65536
hard: 65536
logging:
options:
max-size: "1m"
max-file: "5"
docker compose up -d
cd /home/myusername/docker
mkdir code-server && cd "$_"
services:
code-server:
image: lscr.io/linuxserver/code-server:latest
container_name: code-server
restart: unless-stopped
environment:
PUID: 1000
PGID: 1000
TZ: Etc/UTC
# PASSWORD: #optional
# HASHED_PASSWORD= #optional
# SUDO_PASSWORD: password #optional
# SUDO_PASSWORD_HASH: #optional
# PROXY_DOMAIN: vscode.DOMAIN.COM #optional
DEFAULT_WORKSPACE: /config/workspace #optional
volumes:
- ./config:/config
ports:
- 8350:8443
docker compose up -d