264 lines
12 KiB
Bash
264 lines
12 KiB
Bash
# Name used to identify the CONFLUENCE/Mesh instance being backed up. This appears in archive names and AWS snapshot tags.
|
|
# It should not contain spaces and must be under 100 characters long.
|
|
INSTANCE_NAME=confluence
|
|
|
|
# Type of instance being backed up:
|
|
# - <leave blank> or CONFLUENCE-dc - The instance being backed up is a CONFLUENCE DC instance.
|
|
# - CONFLUENCE-mesh - The instance being backed up is a CONFLUENCE Mesh instance.
|
|
INSTANCE_TYPE=confluence-dc
|
|
|
|
# Owner and group of ${CONFLUENCE_HOME}:
|
|
CONFLUENCE_UID=confluence
|
|
CONFLUENCE_GID=confluence
|
|
|
|
# Strategy for backing up the CONFLUENCE/Mesh home directory and data stores (if configured):
|
|
# - amazon-ebs - Amazon EBS snapshots of the volumes containing data for CONFLUENCE Server/Mesh
|
|
# - rsync - "rsync" of the disk contents to a temporary location. NOTE: This can NOT be used
|
|
# with BACKUP_ZERO_DOWNTIME=true.
|
|
# - zfs - ZFS snapshot strategy for disk backups.
|
|
# - none - Do not attempt to backup the home directory or data stores.
|
|
# Note: this config var was previously named BACKUP_HOME_TYPE
|
|
BACKUP_DISK_TYPE=lvm
|
|
|
|
# Strategy for backing up the database:
|
|
# - amazon-rds - Amazon RDS snapshots
|
|
# - mysql - MySQL using "mysqldump" to backup and "mysql" to restore
|
|
# - postgresql - PostgreSQL using "pg_dump" to backup and "pg_restore" to restore
|
|
# - postgresql-fslevel - PostgreSQL with data directory located in the file system volume as home directory (so
|
|
# that it will be included implicitly in the home volume snapshot)
|
|
# - none - Do not attempt to backup the database.
|
|
#
|
|
# Note: This property is ignored while backing up Mesh nodes.
|
|
BACKUP_DATABASE_TYPE=postgresql
|
|
|
|
# Strategy for archiving backups and/or copying them to an offsite location:
|
|
# - <leave blank> - Do not use an archiving strategy
|
|
# - aws-snapshots - AWS EBS and/or RDS snapshots, with optional copy to another region
|
|
# - gpg-zip - "gpg-zip" archive
|
|
# - tar - Unix "tar" archive
|
|
BACKUP_ARCHIVE_TYPE=tar
|
|
|
|
# Strategy for CONFLUENCE/Mesh disk disaster recovery:
|
|
# - zfs - ZFS snapshot strategy for disk replication.
|
|
# - none - Do not attempt to replicate data on disk.
|
|
STANDBY_DISK_TYPE=none
|
|
|
|
# Strategy for replicating the database:
|
|
# - amazon-rds - Amazon RDS Read replica
|
|
# - postgresql - PostgreSQL replication
|
|
# - none - Do not attempt to replicate the database.
|
|
#
|
|
# Note: This property is ignored while backing up Mesh nodes.
|
|
STANDBY_DATABASE_TYPE=none
|
|
|
|
# If BACKUP_ZERO_DOWNTIME is set to true, data on disk and the database will be backed up WITHOUT locking CONFLUENCE
|
|
# in maintenance mode. NOTE: This can NOT be used with CONFLUENCE Server versions older than 4.8. For more information,
|
|
# see https://confluence.atlassian.com/display/CONFLUENCEServer/Using+CONFLUENCE+Zero+Downtime+Backup.
|
|
# Make sure you read and understand this document before uncommenting this variable.
|
|
#BACKUP_ZERO_DOWNTIME=true
|
|
|
|
# Sub-options for each disk backup strategy
|
|
case ${BACKUP_DISK_TYPE} in
|
|
rsync)
|
|
# The path to the CONFLUENCE/Mesh home directory (with trailing /)
|
|
CONFLUENCE_HOME=/var/atlassian/application-data/CONFLUENCE/
|
|
# Paths to all configured data stores (with trailing /)
|
|
# Only required if one or more data stores is attached to the instance.
|
|
CONFLUENCE_DATA_STORES=()
|
|
# Optional list of repo IDs which should be excluded from the backup. For example: (2 5 88)
|
|
# Note: This property is ignored while backing up Mesh nodes.
|
|
CONFLUENCE_BACKUP_EXCLUDE_REPOS=()
|
|
;;
|
|
lvm)
|
|
# The path to the CONFLUENCE home directory (with trailing /)
|
|
CONFLUENCE_HOME=/data2/confluence
|
|
CONFLUENCE_BACKUP_HOME=/backup/confluence
|
|
# Paths to all configured data stores (with trailing /)
|
|
# Only required if one or more data stores is attached to the instance.
|
|
CONFLUENCE_DATA_STORES=()
|
|
# Optional list of repo IDs which should be excluded from the backup. For example: (2 5 88)
|
|
CONFLUENCE_BACKUP_EXCLUDE_REPOS=()
|
|
;;
|
|
zfs)
|
|
# The name of each filesystem that holds file server data for CONFLUENCE Server/Mesh. This should, at a minimum,
|
|
# include the home directory filesystem, and if configured, the filesystems for each data store.
|
|
# This must be the same name(s) on the standby if using replication.
|
|
# Note: this config var should contain the value previously in ZFS_HOME_TANK_NAME
|
|
ZFS_FILESYSTEM_NAMES=(tank/atlassian-home)
|
|
|
|
# ==== DISASTER RECOVERY VARS ====
|
|
|
|
# The name of the ZFS filesystem containing the shared home directory. This is needed for disaster recovery so
|
|
# that the home directory can be promoted.
|
|
ZFS_HOME_FILESYSTEM=
|
|
|
|
# The user for SSH when running replication commands on the standby file server.
|
|
# Note this user needs password-less sudo on the standby to run zfs commands and password-less ssh from
|
|
# the primary file server to the standby file server.
|
|
STANDBY_SSH_USER=
|
|
# (Optional) Append flags to the SSH commands. e.g. "-i private_key.pem"
|
|
# Useful flags for unattended ssh commands: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
|
|
STANDBY_SSH_OPTIONS=
|
|
|
|
# The hostname of the standby file server
|
|
STANDBY_SSH_HOST=
|
|
;;
|
|
esac
|
|
|
|
# Sub-options for each database backup strategy
|
|
#
|
|
# Note: This property is ignored while backing up Mesh nodes.
|
|
case ${BACKUP_DATABASE_TYPE} in
|
|
mysql)
|
|
CONFLUENCE_DB=CONFLUENCE
|
|
MYSQL_HOST=
|
|
MYSQL_USERNAME=
|
|
MYSQL_PASSWORD=
|
|
MYSQL_BACKUP_OPTIONS=
|
|
;;
|
|
mssql)
|
|
CONFLUENCE_DB=CONFLUENCE
|
|
;;
|
|
postgresql)
|
|
# The connection details for your primary instance's PostgreSQL database. The pg_hba.conf file must
|
|
# be configured to allow the backup and restore scripts full access as POSTGRES_USERNAME with the
|
|
# specified PGPASSWORD. When Disaster Recovery is used, POSTGRES_HOST must also be accessible from
|
|
# the standby system with the same level of access.
|
|
CONFLUENCE_DB=confluence
|
|
POSTGRES_HOST=localhost
|
|
POSTGRES_USERNAME=database1user
|
|
export PGPASSWORD=database1password
|
|
POSTGRES_PORT=5432
|
|
|
|
# ==== DISASTER RECOVERY VARS ====
|
|
|
|
# The full path to the standby server's PostgreSQL data folder. i.e "/var/lib/pgsql94/data"
|
|
# Note: Attempt auto-detection based on major version (Works with CentOS, RHEL and Amazon Linux, override if unsure)
|
|
STANDBY_DATABASE_DATA_DIR="/var/lib/pgsql${psql_major}/data"
|
|
# The user which runs the PostgreSQL system service. This is normally "postgres"
|
|
STANDBY_DATABASE_SERVICE_USER=postgres
|
|
# The name of the replication slot
|
|
STANDBY_DATABASE_REPLICATION_SLOT_NAME=CONFLUENCE
|
|
# The username and password of the user that will be used to execute the replication.
|
|
STANDBY_DATABASE_REPLICATION_USER_USERNAME=
|
|
STANDBY_DATABASE_REPLICATION_USER_PASSWORD=
|
|
# The postgres service name for stopping / starting it.
|
|
# Note: Attempt auto-detection based on major version (Works with CentOS, RHEL and Amazon Linux, override if unsure)
|
|
STANDBY_DATABASE_SERVICE_NAME="postgresql${psql_major}"
|
|
;;
|
|
|
|
postgresql-fslevel)
|
|
# The postgres service name for stopping / starting it at restore time.
|
|
POSTGRESQL_SERVICE_NAME="postgresql${psql_major}"
|
|
;;
|
|
esac
|
|
|
|
case ${BACKUP_ARCHIVE_TYPE} in
|
|
*)
|
|
# The path to working folder for the backup
|
|
CONFLUENCE_BACKUP_ROOT=
|
|
CONFLUENCE_BACKUP_DB=${CONFLUENCE_BACKUP_ROOT}/CONFLUENCE-db/
|
|
CONFLUENCE_BACKUP_HOME=${CONFLUENCE_BACKUP_ROOT}/CONFLUENCE-home/
|
|
CONFLUENCE_BACKUP_DATA_STORES=${CONFLUENCE_BACKUP_ROOT}/CONFLUENCE-data-stores/
|
|
|
|
# The path to where the backup archives are stored
|
|
CONFLUENCE_BACKUP_ARCHIVE_ROOT=
|
|
|
|
# Options for the gpg-zip archive type
|
|
CONFLUENCE_BACKUP_GPG_RECIPIENT=
|
|
;;
|
|
esac
|
|
|
|
# Options to pass to every "curl" command
|
|
CURL_OPTIONS="-L -s -f"
|
|
|
|
# === AWS Variables ===
|
|
if [ "amazon-ebs" = "${BACKUP_DISK_TYPE}" -o "amazon-rds" = "${BACKUP_DATABASE_TYPE}" ]; then
|
|
|
|
AWS_INFO=$(curl ${CURL_OPTIONS} http://169.254.169.254/latest/dynamic/instance-identity/document)
|
|
|
|
# The AWS account ID of the instance. Used to create Amazon Resource Names (ARNs)
|
|
AWS_ACCOUNT_ID=$(echo "${AWS_INFO}" | jq -r .accountId)
|
|
|
|
# The availability zone in which volumes will be created when restoring an instance.
|
|
AWS_AVAILABILITY_ZONE=$(echo "${AWS_INFO}" | jq -r .availabilityZone)
|
|
|
|
# The region for the resources CONFLUENCE is using (volumes, instances, snapshots, etc)
|
|
AWS_REGION=$(echo "${AWS_INFO}" | jq -r .region)
|
|
|
|
# The EC2 instance ID
|
|
AWS_EC2_INSTANCE_ID=$(echo "${AWS_INFO}" | jq -r .instanceId)
|
|
|
|
# Additional AWS tags for EBS and RDS snapshot, tags needs to be in JSON format without enclosing square brackets:
|
|
# Example: AWS_ADDITIONAL_TAGS='{"Key":"example_key", "Value":"example_value"}'
|
|
AWS_ADDITIONAL_TAGS=
|
|
|
|
# Ensure we fsfreeze while snapshots of ebs volumes are taken
|
|
FSFREEZE=true
|
|
fi
|
|
|
|
# Used by the scripts for verbose logging. If not true only errors will be shown.
|
|
CONFLUENCE_VERBOSE_BACKUP=${CONFLUENCE_VERBOSE_BACKUP:-true}
|
|
|
|
# HipChat options
|
|
HIPCHAT_URL=https://api.hipchat.com
|
|
HIPCHAT_ROOM=
|
|
HIPCHAT_TOKEN=
|
|
|
|
# The number of backups to retain. After backups are taken, all old snapshots except for the most recent
|
|
# ${KEEP_BACKUPS} are deleted. Set to 0 to disable cleanup of old snapshots.
|
|
# This is also used by Disaster Recovery to limit snapshots.
|
|
KEEP_BACKUPS=0
|
|
|
|
# ==== Elasticsearch VARS ====
|
|
|
|
# The CONFLUENCE search index (default is CONFLUENCE-search-v1). Most users will NOT need to change this.
|
|
ELASTICSEARCH_INDEX_NAME=CONFLUENCE-search-v1
|
|
# The hostname (and port, if required) for the Elasticsearch instance
|
|
ELASTICSEARCH_HOST=localhost:7992
|
|
ELASTICSEARCH_REPOSITORY_NAME=CONFLUENCE-snapshots
|
|
|
|
case ${BACKUP_ELASTICSEARCH_TYPE} in
|
|
amazon-es)
|
|
# Configuration for the Amazon Elasticsearch Service
|
|
ELASTICSEARCH_S3_BUCKET=
|
|
ELASTICSEARCH_S3_BUCKET_REGION=us-east-1
|
|
# The IAM role that can be used to snapshot AWS Elasticsearch, used to configure the S3 snapshot repository
|
|
ELASTICSEARCH_SNAPSHOT_IAM_ROLE=
|
|
;;
|
|
s3)
|
|
# Configuration for the Amazon S3 snapshot repository (s3)
|
|
ELASTICSEARCH_S3_BUCKET=
|
|
ELASTICSEARCH_S3_BUCKET_REGION=us-east-1
|
|
# Elasticsearch credentials
|
|
ELASTICSEARCH_USERNAME=
|
|
ELASTICSEARCH_PASSWORD=
|
|
;;
|
|
fs)
|
|
# Configuration for the shared filesystem snapshot repository (fs)
|
|
ELASTICSEARCH_REPOSITORY_LOCATION=
|
|
# Elasticsearch credentials
|
|
ELASTICSEARCH_USERNAME=
|
|
ELASTICSEARCH_PASSWORD=
|
|
;;
|
|
esac
|
|
|
|
# ==== DISASTER RECOVERY VARS ====
|
|
|
|
# Only used on a CONFLUENCE Data Center primary instance which has been configured with a Disaster Recovery standby system.
|
|
# See https://confluence.atlassian.com/display/CONFLUENCEServer/Disaster+recovery+guide+for+CONFLUENCE+Data+Center for more information.
|
|
# The JDBC URL for the STANDBY database server.
|
|
# WARNING: It is imperative that you set this to the correct JDBC URL for your STANDBY database.
|
|
# During fail-over, 'promote-home.sh' will write this to your 'CONFLUENCE.properties' file so that
|
|
# your standby CONFLUENCE instance will connect to the right database. If this is incorrect, then
|
|
# in a fail-over scenario your standby CONFLUENCE instance may fail to start or even connect to the
|
|
# incorrect database.
|
|
#
|
|
# Example for PostgreSQL:
|
|
# "jdbc:postgres://standby-db.my-company.com:${POSTGRES_PORT}/${CONFLUENCE_DB}"
|
|
# Example for PostgreSQL running in Amazon RDS
|
|
# jdbc:postgres://${RDS_ENDPOINT}/${CONFLUENCE_DB}
|
|
#
|
|
# Note: This property is ignored while backing up Mesh nodes.
|
|
STANDBY_JDBC_URL=
|