1
0
mirror of https://github.com/mastodon/mastodon synced 2024-12-01 08:18:22 +09:00

Remove nanobox (#17881)

Co-authored-by: Tony Jiang <tjiang@hippo.com>
This commit is contained in:
Tony Jiang 2022-08-13 06:28:16 -07:00 committed by GitHub
parent 7ccf7a73f1
commit 03f19b8cd3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 0 additions and 707 deletions

View File

@ -1,254 +0,0 @@
# Service dependencies
# You may set REDIS_URL instead for more advanced options
REDIS_HOST=$DATA_REDIS_HOST
REDIS_PORT=6379
# REDIS_DB=0
# You may set DATABASE_URL instead for more advanced options
DB_HOST=$DATA_DB_HOST
DB_USER=$DATA_DB_USER
DB_NAME=gonano
DB_PASS=$DATA_DB_PASS
DB_PORT=5432
# DATABASE_URL=postgresql://$DATA_DB_USER:$DATA_DB_PASS@$DATA_DB_HOST/gonano
# Optional Elasticsearch configuration
ES_ENABLED=true
ES_HOST=$DATA_ELASTIC_HOST
ES_PORT=9200
BIND=0.0.0.0
# Federation
# Note: Changing LOCAL_DOMAIN at a later time will cause unwanted side effects, including breaking all existing federation.
# LOCAL_DOMAIN should *NOT* contain the protocol part of the domain e.g https://example.com.
LOCAL_DOMAIN=${APP_NAME}.nanoapp.io
# Changing LOCAL_HTTPS in production is no longer supported. (Mastodon will always serve https:// links)
# Use this only if you need to run mastodon on a different domain than the one used for federation.
# You can read more about this option on https://github.com/tootsuite/documentation/blob/master/Running-Mastodon/Serving_a_different_domain.md
# DO *NOT* USE THIS UNLESS YOU KNOW *EXACTLY* WHAT YOU ARE DOING.
# WEB_DOMAIN=mastodon.example.com
# Use this if you want to have several aliases handler@example1.com
# handler@example2.com etc. for the same user. LOCAL_DOMAIN should not
# be added. Comma separated values
# ALTERNATE_DOMAINS=example1.com,example2.com
# Application secrets
# Generate each with the `rake secret` task (`nanobox run bundle exec rake secret`)
SECRET_KEY_BASE=$SECRET_KEY_BASE
OTP_SECRET=$OTP_SECRET
# VAPID keys (used for push notifications)
# You can generate the keys using the following command (first is the private key, second is the public one)
# You should only generate this once per instance. If you later decide to change it, all push subscription will
# be invalidated, requiring the users to access the website again to resubscribe.
#
# Generate with `rake mastodon:webpush:generate_vapid_key` task (`nanobox run bundle exec rake mastodon:webpush:generate_vapid_key`)
#
# For more information visit https://rossta.net/blog/using-the-web-push-api-with-vapid.html
VAPID_PRIVATE_KEY=$VAPID_PRIVATE_KEY
VAPID_PUBLIC_KEY=$VAPID_PUBLIC_KEY
# Registrations
# Single user mode will disable registrations and redirect frontpage to the first profile
# SINGLE_USER_MODE=true
# Prevent registrations with following e-mail domains
# EMAIL_DOMAIN_BLACKLIST=example1.com|example2.de|etc
# Only allow registrations with the following e-mail domains
# EMAIL_DOMAIN_WHITELIST=example1.com|example2.de|etc
# Optionally change default language
# DEFAULT_LOCALE=de
# E-mail configuration
# Note: Mailgun and SparkPost (https://sparkpo.st/smtp) each have good free tiers
# If you want to use an SMTP server without authentication (e.g local Postfix relay)
# then set SMTP_AUTH_METHOD and SMTP_OPENSSL_VERIFY_MODE to 'none' and
# *comment* SMTP_LOGIN and SMTP_PASSWORD (leaving them blank is not enough).
SMTP_SERVER=$SMTP_SERVER
SMTP_PORT=587
SMTP_LOGIN=$SMTP_LOGIN
SMTP_PASSWORD=$SMTP_PASSWORD
SMTP_FROM_ADDRESS=notifications@${APP_NAME}.nanoapp.io
#SMTP_REPLY_TO=
#SMTP_DOMAIN= # defaults to LOCAL_DOMAIN
#SMTP_DELIVERY_METHOD=smtp # delivery method can also be sendmail
#SMTP_AUTH_METHOD=plain
#SMTP_CA_FILE=/etc/ssl/certs/ca-certificates.crt
#SMTP_OPENSSL_VERIFY_MODE=peer
#SMTP_ENABLE_STARTTLS_AUTO=true
#SMTP_TLS=true
# Optional user upload path and URL (images, avatars). Default is :rails_root/public/system. If you set this variable, you are responsible for making your HTTP server (eg. nginx) serve these files.
# PAPERCLIP_ROOT_PATH=/var/lib/mastodon/public-system
# PAPERCLIP_ROOT_URL=/system
# Optional asset host for multi-server setups
# The asset host must allow cross origin request from WEB_DOMAIN or LOCAL_DOMAIN
# if WEB_DOMAIN is not set. For example, the server may have the
# following header field:
# Access-Control-Allow-Origin: https://example.com/
# CDN_HOST=https://assets.example.com
# S3 (optional)
# The attachment host must allow cross origin request from WEB_DOMAIN or
# LOCAL_DOMAIN if WEB_DOMAIN is not set. For example, the server may have the
# following header field:
# Access-Control-Allow-Origin: https://192.168.1.123:9000/
# S3_ENABLED=true
# S3_BUCKET=
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# S3_REGION=
# S3_PROTOCOL=http
# S3_HOSTNAME=192.168.1.123:9000
# S3 (Minio Config (optional) Please check Minio instance for details)
# The attachment host must allow cross origin request - see the description
# above.
# S3_ENABLED=true
# S3_BUCKET=
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# S3_REGION=
# S3_PROTOCOL=https
# S3_HOSTNAME=
# S3_ENDPOINT=
# S3_SIGNATURE_VERSION=
# Google Cloud Storage (optional)
# Use S3 compatible API. Since GCS does not support Multipart Upload,
# increase the value of S3_MULTIPART_THRESHOLD to disable Multipart Upload.
# The attachment host must allow cross origin request - see the description
# above.
# S3_ENABLED=true
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# S3_REGION=
# S3_PROTOCOL=https
# S3_HOSTNAME=storage.googleapis.com
# S3_ENDPOINT=https://storage.googleapis.com
# S3_MULTIPART_THRESHOLD=52428801 # 50.megabytes
# Swift (optional)
# The attachment host must allow cross origin request - see the description
# above.
# SWIFT_ENABLED=true
# SWIFT_USERNAME=
# For Keystone V3, the value for SWIFT_TENANT should be the project name
# SWIFT_TENANT=
# SWIFT_PASSWORD=
# Some OpenStack V3 providers require PROJECT_ID (optional)
# SWIFT_PROJECT_ID=
# Keystone V2 and V3 URLs are supported. Use a V3 URL if possible to avoid
# issues with token rate-limiting during high load.
# SWIFT_AUTH_URL=
# SWIFT_CONTAINER=
# SWIFT_OBJECT_URL=
# SWIFT_REGION=
# Defaults to 'default'
# SWIFT_DOMAIN_NAME=
# Defaults to 60 seconds. Set to 0 to disable
# SWIFT_CACHE_TTL=
# Optional alias for S3 (e.g. to serve files on a custom domain, possibly using Cloudfront or Cloudflare)
# S3_ALIAS_HOST=
# Streaming API integration
# STREAMING_API_BASE_URL=
# Advanced settings
# If you need to use pgBouncer, you need to disable prepared statements:
# PREPARED_STATEMENTS=false
# Cluster number setting for streaming API server.
# If you comment out following line, cluster number will be `numOfCpuCores - 1`.
# STREAMING_CLUSTER_NUM=1
# Docker mastodon user
# If you use Docker, you may want to assign UID/GID manually.
# UID=1000
# GID=1000
# LDAP authentication (optional)
# LDAP_ENABLED=true
# LDAP_HOST=localhost
# LDAP_PORT=389
# LDAP_METHOD=simple_tls
# LDAP_BASE=
# LDAP_BIND_DN=
# LDAP_PASSWORD=
# LDAP_UID=cn
# LDAP_MAIL=mail
# LDAP_SEARCH_FILTER=(|(%{uid}=%{email})(%{mail}=%{email}))
# LDAP_UID_CONVERSION_ENABLED=true
# LDAP_UID_CONVERSION_SEARCH=., -
# LDAP_UID_CONVERSION_REPLACE=_
# PAM authentication (optional)
# PAM authentication uses for the email generation the "email" pam variable
# and optional as fallback PAM_DEFAULT_SUFFIX
# The pam environment variable "email" is provided by:
# https://github.com/devkral/pam_email_extractor
# PAM_ENABLED=true
# Fallback email domain for email address generation (LOCAL_DOMAIN by default)
# PAM_EMAIL_DOMAIN=example.com
# Name of the pam service (pam "auth" section is evaluated)
# PAM_DEFAULT_SERVICE=rpam
# Name of the pam service used for checking if an user can register (pam "account" section is evaluated) (nil (disabled) by default)
# PAM_CONTROLLED_SERVICE=rpam
# Optional CAS authentication (cf. omniauth-cas) :
# CAS_ENABLED=true
# CAS_URL=https://sso.myserver.com/
# CAS_HOST=sso.myserver.com/
# CAS_PORT=443
# CAS_SSL=true
# CAS_VALIDATE_URL=
# CAS_CALLBACK_URL=
# CAS_LOGOUT_URL=
# CAS_LOGIN_URL=
# CAS_UID_FIELD='user'
# CAS_CA_PATH=
# CAS_DISABLE_SSL_VERIFICATION=false
# CAS_UID_KEY='user'
# CAS_NAME_KEY='name'
# CAS_EMAIL_KEY='email'
# CAS_NICKNAME_KEY='nickname'
# CAS_FIRST_NAME_KEY='firstname'
# CAS_LAST_NAME_KEY='lastname'
# CAS_LOCATION_KEY='location'
# CAS_IMAGE_KEY='image'
# CAS_PHONE_KEY='phone'
# CAS_SECURITY_ASSUME_EMAIL_IS_VERIFIED=true
# Optional SAML authentication (cf. omniauth-saml)
# SAML_ENABLED=true
# SAML_ACS_URL=http://localhost:3000/auth/auth/saml/callback
# SAML_ISSUER=https://example.com
# SAML_IDP_SSO_TARGET_URL=https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO
# SAML_IDP_CERT=
# SAML_IDP_CERT_FINGERPRINT=
# SAML_NAME_IDENTIFIER_FORMAT=
# SAML_CERT=
# SAML_PRIVATE_KEY=
# SAML_SECURITY_WANT_ASSERTION_SIGNED=true
# SAML_SECURITY_WANT_ASSERTION_ENCRYPTED=true
# SAML_SECURITY_ASSUME_EMAIL_IS_VERIFIED=true
# SAML_ATTRIBUTES_STATEMENTS_UID="urn:oid:0.9.2342.19200300.100.1.1"
# SAML_ATTRIBUTES_STATEMENTS_EMAIL="urn:oid:1.3.6.1.4.1.5923.1.1.1.6"
# SAML_ATTRIBUTES_STATEMENTS_FULL_NAME="urn:oid:2.16.840.1.113730.3.1.241"
# SAML_ATTRIBUTES_STATEMENTS_FIRST_NAME="urn:oid:2.5.4.42"
# SAML_ATTRIBUTES_STATEMENTS_LAST_NAME="urn:oid:2.5.4.4"
# SAML_UID_ATTRIBUTE="urn:oid:0.9.2342.19200300.100.1.1"
# SAML_ATTRIBUTES_STATEMENTS_VERIFIED=
# SAML_ATTRIBUTES_STATEMENTS_VERIFIED_EMAIL=
# Use HTTP proxy for outgoing request (optional)
# http_proxy=http://gateway.local:8118
# Access control for hidden service.
# ALLOW_ACCESS_TO_HIDDEN_SERVICE=true

View File

@ -1,205 +0,0 @@
run.config:
engine: ruby
engine.config:
runtime: ruby-2.5
extra_packages:
# basic servers:
- nginx
- nodejs
# for images:
- ImageMagick
- jemalloc
# for videos:
- ffmpeg3
# to prep the .env file:
- gettext-tools
# for node-gyp, used in the asset compilation process:
- python-2
# i18n:
- libidn
cache_dirs:
- node_modules
extra_path_dirs:
- node_modules/.bin
build_triggers:
- .ruby-version
- Gemfile
- Gemfile.lock
- package.json
- yarn.lock
extra_steps:
- cp .env.nanobox .env
- yarn
fs_watch: true
deploy.config:
extra_steps:
- NODE_ENV=production bundle exec rake assets:precompile
transform:
- 'envsubst < /app/.env.nanobox > /app/.env.production'
- |-
if [ -z "$LOCAL_DOMAIN" ]
then
. /app/.env.production
export LOCAL_DOMAIN
fi
erb /app/nanobox/nginx-web.conf.erb > /app/nanobox/nginx-web.conf
erb /app/nanobox/nginx-stream.conf.erb > /app/nanobox/nginx-stream.conf
- touch /app/log/production.log
before_live:
web.web:
- bin/tootctl cache clear
- bundle exec rake db:migrate:setup
after_live:
worker.sidekiq:
- |-
if [[ "${ES_ENABLED}" != "false" ]]
then
bin/tootctl search deploy
fi
web.web:
start:
nginx: nginx -c /app/nanobox/nginx-web.conf
rails: bundle exec puma -C /app/config/puma.rb
routes:
- '/'
writable_dirs:
- tmp
log_watch:
rails: 'log/production.log'
network_dirs:
data.storage:
- public/system
web.stream:
start:
nginx: nginx -c /app/nanobox/nginx-stream.conf
node: yarn run start
routes:
- '/api/v1/streaming*'
# Somehow we're getting requests for scheme://domain//api/v1/streaming* - match those, too
- '//api/v1/streaming*'
writable_dirs:
- tmp
worker.sidekiq:
start:
default: bundle exec sidekiq -c 5 -q default -L /app/log/sidekiq.log
mailers: bundle exec sidekiq -c 5 -q mailers -L /app/log/sidekiq.log
pull: bundle exec sidekiq -c 5 -q pull -L /app/log/sidekiq.log
push: bundle exec sidekiq -c 5 -q push -L /app/log/sidekiq.log
scheduler: bundle exec sidekiq -c 5 -q scheduler -L /app/log/sidekiq.log
writable_dirs:
- tmp
log_watch:
rails: 'log/production.log'
sidekiq: 'log/sidekiq.log'
network_dirs:
data.storage:
- public/system
data.db:
image: nanobox/postgresql:9.6
cron:
- id: backup
schedule: '0 3 * * *'
command: |
PGPASSWORD=${DATA_DB_PASS} pg_dump -U ${DATA_DB_USER} -w -Fc -O gonano |
gzip |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).sql.gz -X POST -T - >&2
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
sed 's/,/\n/g' |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: \?"\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done
data.elastic:
image: nanobox/elasticsearch:5
cron:
- id: backup
schedule: '0 3 * * *'
command: |
id=$(cat /proc/sys/kernel/random/uuid)
curl -X PUT -H "Content-Type: application/json" "127.0.0.1:9200/_snapshot/${id}" -d "{\"type\": \"fs\",\"settings\": {\"location\": \"/var/tmp/${id}\",\"compress\": true}}"
curl -X PUT -H "Content-Type: application/json" "127.0.0.1:9200/_snapshot/${id}/backup?wait_for_completion=true&pretty"
tar -cz -C "/var/tmp/${id}" . |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).tgz -X POST -T - >&2
curl -X DELETE -H "Content-Type: application/json" "127.0.0.1:9200/_snapshot/${id}"
rm -rf "/var/tmp/${id}"
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
sed 's/,/\n/g' |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: \?"\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done
data.redis:
image: nanobox/redis:4.0
cron:
- id: backup
schedule: '0 3 * * *'
command: |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).rdb -X POST -T /data/var/db/redis/dump.rdb >&2
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
sed 's/,/\n/g' |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: \?"\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done
data.storage:
image: nanobox/unfs:0.9
cron:
- id: backup
schedule: '0 3 * * *'
command: |
tar cz -C /data/var/db/unfs/ . |
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/backup-${HOSTNAME}-$(date -u +%Y-%m-%d.%H-%M-%S).tgz -X POST -T - >&2
curl -k -s -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/ |
sed 's/,/\n/g' |
grep ${HOSTNAME} |
sort |
head -n-${BACKUP_COUNT:-1} |
sed 's/.*: \?"\(.*\)".*/\1/' |
while read file
do
curl -k -H "X-AUTH-TOKEN: ${WAREHOUSE_DATA_HOARDER_TOKEN}" https://${WAREHOUSE_DATA_HOARDER_HOST}:7410/blobs/${file} -X DELETE
done

View File

@ -1,92 +0,0 @@
worker_processes 1;
daemon off;
events {
worker_connections 1024;
}
http {
include /data/etc/nginx/mime.types;
sendfile on;
gzip on;
gzip_disable "MSIE [1-6]\.";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_min_length 500;
gzip_http_version 1.1;
gzip_types text/plain text/xml text/javascript text/css text/comma-separated-values application/xml+rss application/xml application/x-javascript application/json application/javascript application/atom+xml;
# Proxy upstream to the puma process
upstream rails {
server 127.0.0.1:3000;
}
# Proxy upstream to the node process
upstream node {
server 127.0.0.1:4000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Configuration for Nginx
server {
# Listen on port 8080
listen 8080;
keepalive_timeout 70;
client_max_body_size 80M;
root /app/public;
add_header Strict-Transport-Security "max-age=31536000";
location / {
try_files $uri @rails;
}
# Proxy connections to rails
location @rails {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Proxy "";
proxy_pass_header Server;
proxy_pass http://rails;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
# Proxy connections to node
location /api/v1/streaming {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Proxy "";
proxy_pass http://node;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
}
error_page 500 501 502 503 504 /500.html;
}

View File

@ -1,66 +0,0 @@
worker_processes 1;
daemon off;
events {
worker_connections 1024;
}
http {
include /data/etc/nginx/mime.types;
sendfile on;
gzip on;
gzip_disable "MSIE [1-6]\.";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_min_length 500;
gzip_http_version 1.1;
gzip_types text/plain text/xml text/javascript text/css text/comma-separated-values application/xml+rss application/xml application/x-javascript application/json application/javascript application/atom+xml;
# Proxy upstream to the node process
upstream node {
server 127.0.0.1:4000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Configuration for Nginx
server {
# Listen on port 8080
listen 8080;
keepalive_timeout 70;
client_max_body_size 80M;
root /app/public;
add_header Strict-Transport-Security "max-age=31536000";
location / {
try_files $uri @node;
}
# Proxy connections to node
location @node {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Proxy "";
proxy_pass http://node;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
tcp_nodelay on;
}
}
}

View File

@ -1,90 +0,0 @@
worker_processes 1;
daemon off;
events {
worker_connections 1024;
}
http {
include /data/etc/nginx/mime.types;
sendfile on;
gzip on;
gzip_disable "MSIE [1-6]\.";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_min_length 500;
gzip_http_version 1.1;
gzip_types text/plain text/xml text/javascript text/css text/comma-separated-values application/xml+rss application/xml application/x-javascript application/json application/javascript application/atom+xml;
# Proxy upstream to the puma process
upstream rails {
server 127.0.0.1:3000;
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Configuration for Nginx
server {
# Listen on port 8080
listen 8080;
keepalive_timeout 70;
client_max_body_size 80M;
root /app/public;
add_header Strict-Transport-Security "max-age=31536000";
location / {
try_files $uri @rails;
}
location /sw.js {
add_header Cache-Control "public, max-age=0";
add_header Strict-Transport-Security "max-age=31536000";
try_files $uri @rails;
}
location ~ ^/(emoji|packs|system/media_attachments/files|system/accounts/avatars) {
add_header Cache-Control "public, max-age=31536000, immutable";
add_header Strict-Transport-Security "max-age=31536000";
try_files $uri @rails;
}
# Proxy connections to rails
location @rails {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Proxy "";
proxy_pass_header Server;
proxy_pass http://rails;
proxy_buffering on;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_cache CACHE;
proxy_cache_valid 200 7d;
proxy_cache_valid 410 24h;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
add_header Strict-Transport-Security "max-age=31536000";
add_header X-Cached $upstream_cache_status;
tcp_nodelay on;
}
}
proxy_cache_path /data/var/cache/nginx levels=1:2 keys_zone=CACHE:10m inactive=7d max_size=1g;
error_page 500 501 502 503 504 /500.html;
}