first commit
This commit is contained in:
commit
abc31a6069
22
LICENSE
Normal file
22
LICENSE
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 ej52
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
39
lxc/nginx-proxy-manager/README.md
Normal file
39
lxc/nginx-proxy-manager/README.md
Normal file
@ -0,0 +1,39 @@
|
||||
# Nginx Proxy Manager in Proxmox LXC container
|
||||
|
||||
Many benefits can be gained by using a LXC container compared to a VM. The resources needed to run a LXC container are less than running a VM. Modifing the resouces assigned to the LXC container can be done without having to reboot the container. The serial devices connected to Proxmox can be shared with multiple LXC containers simulatenously.
|
||||
|
||||
## Usage
|
||||
|
||||
***Note:*** _Before using this repo, make sure Proxmox is up to date._
|
||||
|
||||
To create a new LXC container on Proxmox and setup Nginx Proxy Manager to run inside of it, run the following in a SSH connection or the Proxmox web shell.
|
||||
|
||||
```bash
|
||||
curl -sL https://raw.githubusercontent.com/ej52/proxmox/main/lxc/nginx-proxy-manager/create.sh | bash -s
|
||||
```
|
||||
|
||||
### Command line arguments
|
||||
| argument | default | description |
|
||||
|--------------------|----------------------|--------------------------------------------------------|
|
||||
| --id | $nextid | container id |
|
||||
| --bridge | vmbr0 | bridge used for eth0 |
|
||||
| --cores | 1 | number of cpu cores |
|
||||
| --disksize | 2GB | size of disk |
|
||||
| --hostname | nginx-proxy-manager | hostname of the container |
|
||||
| --memory | 512 | amount of memory |
|
||||
| --storage | local-lvm | storage location for container disk |
|
||||
| --swap | 0 | Amount of SWAP |
|
||||
|
||||
you can set these parameters by appending ` -- <parameter> <value>` like:
|
||||
|
||||
```bash
|
||||
curl -sL https://raw.githubusercontent.com/ej52/proxmox/main/lxc/nginx-proxy-manager/create.sh | bash -s -- -c 4
|
||||
```
|
||||
|
||||
## Console
|
||||
|
||||
There is no login required to access the console from the Proxmox web UI. If you are presented with a blank screen, press `CTRL + C` to generate a prompt.
|
||||
|
||||
## Thanks
|
||||
|
||||
- [whiskerz007](https://github.com/whiskerz007?tab=repositories)
|
206
lxc/nginx-proxy-manager/create.sh
Normal file
206
lxc/nginx-proxy-manager/create.sh
Normal file
@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -Eeuo pipefail
|
||||
|
||||
trap error ERR
|
||||
trap 'popd >/dev/null; rm -rf $_temp_dir;' EXIT
|
||||
|
||||
function info { echo -e "\e[32m[info] $*\e[39m"; }
|
||||
function warn { echo -e "\e[33m[warn] $*\e[39m"; }
|
||||
function error {
|
||||
trap - ERR
|
||||
|
||||
if [ -z "${1-}" ]; then
|
||||
echo -e "\e[31m[error] $(caller): ${BASH_COMMAND}\e[39m"
|
||||
else
|
||||
echo -e "\e[31m[error] $1\e[39m"
|
||||
fi
|
||||
|
||||
if [ ! -z ${_ctid-} ]; then
|
||||
if [ ! -z ${_mount+x} ]; then
|
||||
pct unmount $_ctid &>/dev/null
|
||||
fi
|
||||
if $(pct status $_ctid &>/dev/null); then
|
||||
if [ "$(pct status $_ctid 2>/dev/null | awk '{print $2}')" == "running" ]; then
|
||||
pct stop $_ctid &>/dev/null
|
||||
fi
|
||||
pct destroy $_ctid &>/dev/null
|
||||
elif [ "$(pvesm list $_storage --vmid $_ctid 2>/dev/null | awk 'FNR == 2 {print $2}')" != "" ]; then
|
||||
pvesm free $_rootfs &>/dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Base raw github URL
|
||||
_raw_base="https://raw.githubusercontent.com/ej52/proxmox/main/lxc/nginx-proxy-manager"
|
||||
# Operating system
|
||||
_os_type=alpine
|
||||
_os_version=3.12
|
||||
# System architecture
|
||||
_arch=$(dpkg --print-architecture)
|
||||
|
||||
# Create temp working directory
|
||||
_temp_dir=$(mktemp -d)
|
||||
pushd $_temp_dir >/dev/null
|
||||
|
||||
# Parse command line parameters
|
||||
while [[ $# -gt 0 ]]; do
|
||||
arg="$1"
|
||||
|
||||
case $arg in
|
||||
--id)
|
||||
_ctid=$2
|
||||
shift
|
||||
;;
|
||||
--bridge)
|
||||
_bridge=$2
|
||||
shift
|
||||
;;
|
||||
--cores)
|
||||
_cpu_cores=$2
|
||||
shift
|
||||
;;
|
||||
--disksize)
|
||||
_disk_size=$2
|
||||
shift
|
||||
;;
|
||||
--hostname)
|
||||
_host_name=$2
|
||||
shift
|
||||
;;
|
||||
--memory)
|
||||
_memory=$2
|
||||
shift
|
||||
;;
|
||||
--storage)
|
||||
_storage=$2
|
||||
shift
|
||||
;;
|
||||
--swap)
|
||||
_swap=$2
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
error "Unrecognized option $1"
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Check user settings or set defaults
|
||||
_ctid=${_ctid:-`pvesh get /cluster/nextid`}
|
||||
_cpu_cores=${_cpu_cores:-1}
|
||||
_disk_size=${_disk_size:-2G}
|
||||
_host_name=${_host_name:-npm}
|
||||
_bridge=${_bridge:-vmbr0}
|
||||
_memory=${_memory:-512}
|
||||
_swap=${_swap:-0}
|
||||
_storage=${_storage:-local-lvm}
|
||||
|
||||
# Test if ID is in use
|
||||
if pct status $_ctid &>/dev/null; then
|
||||
warn "ID '$_ctid' is already in use."
|
||||
unset _ctid
|
||||
error "Cannot use ID that is already in use."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
warn "Container will be created using the following settings."
|
||||
warn ""
|
||||
warn "ctid: $_ctid"
|
||||
warn "hostname: $_host_name"
|
||||
warn "cores: $_cpu_cores"
|
||||
warn "memory: $_memory"
|
||||
warn "swap: $_swap"
|
||||
warn "disksize: $_disk_size"
|
||||
warn "bridge: $_bridge"
|
||||
warn "storage: $_storage"
|
||||
warn ""
|
||||
warn "If you want to abort, hit ctrl+c within 10 seconds..."
|
||||
echo ""
|
||||
|
||||
sleep 10
|
||||
|
||||
# Download latest Alpine LXC template
|
||||
info "Updating LXC template list..."
|
||||
pveam update &>/dev/null
|
||||
|
||||
info "Downloading LXC template..."
|
||||
mapfile -t _templates < <(pveam available -section system | sed -n "s/.*\($_os_type-$_os_version.*\)/\1/p" | sort -t - -k 2 -V)
|
||||
[ ${#_templates[@]} -eq 0 ] \
|
||||
&& error "No LXC template found for $_os_type-$_os_version"
|
||||
|
||||
_template="${_templates[-1]}"
|
||||
pveam download local $_template &>/dev/null \
|
||||
|| error "A problem occured while downloading the LXC template."
|
||||
|
||||
# Create variables for container disk
|
||||
_storage_type=$(pvesm status -storage $_storage 2>/dev/null | awk 'NR>1 {print $2}')
|
||||
case $_storage_type in
|
||||
dir|nfs)
|
||||
_disk_ext=".raw"
|
||||
_disk_ref="$_ctid/"
|
||||
;;
|
||||
zfspool)
|
||||
_disk_prefix="subvol"
|
||||
_disk_format="subvol"
|
||||
;;
|
||||
esac
|
||||
_disk=${_disk_prefix:-vm}-${_ctid}-disk-0${_disk_ext-}
|
||||
_rootfs=${_storage}:${_disk_ref-}${_disk}
|
||||
|
||||
# Create LXC
|
||||
info "Allocating storage for LXC container..."
|
||||
pvesm alloc $_storage $_ctid $_disk $_disk_size --format ${_disk_format:-raw} &>/dev/null \
|
||||
|| error "A problem occured while allocating storage."
|
||||
|
||||
if [ "$_storage_type" = "zfspool" ]; then
|
||||
warn "Some containers may not work properly due to ZFS not supporting 'fallocate'."
|
||||
else
|
||||
mkfs.ext4 $(pvesm path $_rootfs) &>/dev/null
|
||||
fi
|
||||
|
||||
info "Creating LXC container..."
|
||||
_pct_options=(
|
||||
-arch $_arch
|
||||
-cmode shell
|
||||
-hostname $_host_name
|
||||
-cores $_cpu_cores
|
||||
-memory $_memory
|
||||
-net0 name=eth0,bridge=$_bridge,ip=dhcp
|
||||
-onboot 1
|
||||
-ostype $_os_type
|
||||
-rootfs $_rootfs,size=$_disk_size
|
||||
-storage $_storage
|
||||
-swap $_swap
|
||||
-unprivileged 1
|
||||
-tags npm
|
||||
)
|
||||
pct create $_ctid "local:vztmpl/$_template" ${_pct_options[@]} &>/dev/null \
|
||||
|| error "A problem occured while creating LXC container."
|
||||
|
||||
# Set container timezone to match host
|
||||
cat << 'EOF' >> /etc/pve/lxc/${_ctid}.conf
|
||||
lxc.hook.mount: sh -c 'ln -fs $(readlink /etc/localtime) ${LXC_ROOTFS_MOUNT}/etc/localtime'
|
||||
EOF
|
||||
|
||||
# Setup container
|
||||
info "Setting up LXC container..."
|
||||
pct start $_ctid
|
||||
pct exec $_ctid -- sh -c "wget --no-cache -qO - $_raw_base/setup.sh | sh"
|
||||
|
||||
info "Rebooting LXC container..."
|
||||
pct reboot $_ctid
|
||||
|
||||
# Get network details and show completion message
|
||||
_ip=$(pct exec $_ctid -- ip a s dev eth0 | sed -n '/inet / s/\// /p' | awk '{print $2}')
|
||||
info "Successfully created Nginx Proxy Manager LXC $_ctid."
|
||||
echo -e "
|
||||
|
||||
Nginx Proxy Manager is reachable by going to the following URL.
|
||||
|
||||
http://${_ip}:81
|
||||
|
||||
"
|
200
lxc/nginx-proxy-manager/setup.sh
Normal file
200
lxc/nginx-proxy-manager/setup.sh
Normal file
@ -0,0 +1,200 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
|
||||
# Helpers
|
||||
function info { echo -e "\e[32m[info] $*\e[39m"; }
|
||||
|
||||
_temp_dir=$(mktemp -d)
|
||||
cd $_temp_dir
|
||||
|
||||
_version_alpine=${_version_alpine:-3.12}
|
||||
_version_npm=${_version_npm:-2.6.1}
|
||||
|
||||
# add openresty repo
|
||||
if [ ! -f /etc/apk/keys/admin@openresty.com-5ea678a6.rsa.pub ]; then
|
||||
wget -q -P /etc/apk/keys/ 'http://openresty.org/package/admin@openresty.com-5ea678a6.rsa.pub' &>/dev/null
|
||||
echo "http://openresty.org/package/alpine/v$_version_alpine/main" >> /etc/apk/repositories
|
||||
fi
|
||||
|
||||
# Update container OS
|
||||
info "Updating container OS..."
|
||||
apk update >/dev/null
|
||||
apk upgrade &>/dev/null
|
||||
|
||||
echo "fs.file-max = 65535" > /etc/sysctl.conf
|
||||
|
||||
# Install prerequisites
|
||||
info "Installing prerequisites..."
|
||||
apk add python3 git certbot jq openresty nodejs npm yarn openssl &>/dev/null
|
||||
python3 -m ensurepip &>/dev/null
|
||||
|
||||
if [ -f /etc/init.d/npm ]; then
|
||||
info "Stoping services..."
|
||||
rc-service npm stop &>/dev/null
|
||||
rc-service openresty stop &>/dev/null
|
||||
sleep 2
|
||||
|
||||
info "Cleaning old files..."
|
||||
# Cleanup for new install
|
||||
rm -rf /app \
|
||||
/var/www/html \
|
||||
/etc/nginx \
|
||||
/var/log/nginx \
|
||||
/var/lib/nginx \
|
||||
/var/cache/nginx &>/dev/null
|
||||
fi
|
||||
|
||||
# Download nginx-proxy-manager source
|
||||
info "Downloading NPM v$_version_npm..."
|
||||
wget -qc https://github.com/jc21/nginx-proxy-manager/archive/v$_version_npm.tar.gz -O - | tar -xz
|
||||
|
||||
cd nginx-proxy-manager-$_version_npm
|
||||
|
||||
# Copy runtime files
|
||||
_rootfs=docker/rootfs
|
||||
mkdir -p /var/www/html && cp -r $_rootfs/var/www/html/* /var/www/html
|
||||
mkdir -p /etc/nginx/logs && cp -r $_rootfs/etc/nginx/* /etc/nginx
|
||||
rm -f /etc/nginx/conf.d/dev.conf
|
||||
cp $_rootfs/etc/letsencrypt.ini /etc/letsencrypt.ini
|
||||
|
||||
# Update NPM version in package.json files
|
||||
echo "`jq --arg _version_npm $_version_npm '.version=$_version_npm' backend/package.json`" > backend/package.json
|
||||
echo "`jq --arg _version_npm $_version_npm '.version=$_version_npm' frontend/package.json`" > frontend/package.json
|
||||
|
||||
# Create required folders
|
||||
mkdir -p /tmp/nginx/body \
|
||||
/run/nginx \
|
||||
/var/log/nginx \
|
||||
/data/nginx \
|
||||
/data/custom_ssl \
|
||||
/data/logs \
|
||||
/data/access \
|
||||
/data/nginx/default_host \
|
||||
/data/nginx/default_www \
|
||||
/data/nginx/proxy_host \
|
||||
/data/nginx/redirection_host \
|
||||
/data/nginx/stream \
|
||||
/data/nginx/dead_host \
|
||||
/data/nginx/temp \
|
||||
/var/lib/nginx/cache/public \
|
||||
/var/lib/nginx/cache/private \
|
||||
/var/cache/nginx/proxy_temp
|
||||
|
||||
touch /var/log/nginx/error.log && chmod 777 /var/log/nginx/error.log && chmod -R 777 /var/cache/nginx
|
||||
chown root /tmp/nginx
|
||||
|
||||
# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
|
||||
# thanks @tfmm
|
||||
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" > /etc/nginx/conf.d/include/resolvers.conf
|
||||
|
||||
# Generate dummy self-signed certificate.
|
||||
if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]
|
||||
then
|
||||
echo "Generating dummy SSL certificate..."
|
||||
openssl req \
|
||||
-new \
|
||||
-newkey rsa:2048 \
|
||||
-days 3650 \
|
||||
-nodes \
|
||||
-x509 \
|
||||
-subj '/O=Nginx Proxy Manager/OU=Dummy Certificate/CN=localhost' \
|
||||
-keyout /data/nginx/dummykey.pem \
|
||||
-out /data/nginx/dummycert.pem
|
||||
fi
|
||||
|
||||
# Copy app files
|
||||
mkdir -p /app/global
|
||||
cp -r backend/* /app
|
||||
cp -r global/* /app/global
|
||||
|
||||
# Build the frontend
|
||||
info "Building frontend..."
|
||||
mkdir -p /app/frontend/images
|
||||
cd frontend
|
||||
yarn install &>/dev/null
|
||||
yarn build &>/dev/null
|
||||
cp -r dist/* /app/frontend
|
||||
cp -r app-images/* /app/frontend/images
|
||||
|
||||
cd /app
|
||||
info "Initalizing backend..."
|
||||
rm -rf /app/config/default.json &>/dev/null
|
||||
if [ ! -f /app/config/production.json ]; then
|
||||
cat << 'EOF' > /app/config/production.json
|
||||
{
|
||||
"database": {
|
||||
"engine": "knex-native",
|
||||
"knex": {
|
||||
"client": "sqlite3",
|
||||
"connection": {
|
||||
"filename": "/data/database.sqlite"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
yarn install &>/dev/null
|
||||
|
||||
# Create required folders
|
||||
mkdir -p /data
|
||||
|
||||
# Update openresty config
|
||||
info "Configuring openresty..."
|
||||
cat << 'EOF' > /etc/conf.d/openresty
|
||||
# Configuration for /etc/init.d/openresty
|
||||
|
||||
cfgfile=/etc/nginx/nginx.conf
|
||||
app_prefix=/etc/nginx
|
||||
EOF
|
||||
rc-update add openresty boot &>/dev/null
|
||||
rc-service openresty restart &>/dev/null
|
||||
|
||||
[ -f /usr/sbin/nginx ] && rm /usr/sbin/nginx
|
||||
ln -s /usr/local/openresty/nginx/sbin/nginx /usr/sbin/nginx
|
||||
|
||||
# Create NPM service
|
||||
info "Creating NPM service..."
|
||||
cat << 'EOF' > /etc/init.d/npm
|
||||
#!/sbin/openrc-run
|
||||
description="Nginx Proxy Manager"
|
||||
|
||||
command="/usr/bin/node"
|
||||
command_args="index.js --abort_on_uncaught_exception --max_old_space_size=250"
|
||||
command_background="yes"
|
||||
directory="/app"
|
||||
|
||||
pidfile="/var/run/npm.pid"
|
||||
output_log="/var/log/npm.log"
|
||||
error_log="/var/log/npm.err"
|
||||
|
||||
depends () {
|
||||
before openresty
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
mkdir -p /tmp/nginx/body \
|
||||
/data/letsencrypt-acme-challenge \
|
||||
/var/log/npm
|
||||
|
||||
export NODE_ENV=production
|
||||
}
|
||||
|
||||
stop() {
|
||||
pkill -9 -f node
|
||||
return 0
|
||||
}
|
||||
|
||||
restart() {
|
||||
$0 stop
|
||||
$0 start
|
||||
}
|
||||
EOF
|
||||
chmod a+x /etc/init.d/npm
|
||||
rc-update add npm boot &>/dev/null
|
||||
rc-service npm start &>/dev/null
|
||||
|
||||
# Cleanup
|
||||
info "Cleaning up..."
|
||||
rm -rf $_temp_dir/nginx-proxy-manager-${_version_npm} &>/dev/null
|
||||
apk del git jq npm &>/dev/null
|
Loading…
Reference in New Issue
Block a user