mirror of https://github.com/OpenVidu/openvidu.git
Add openvidu-deployment scripts
parent
c1f2971881
commit
9e04d59b61
|
@ -0,0 +1,944 @@
|
|||
AWSTemplateFormatVersion: 2010-09-09
|
||||
Description: OpenVidu Community - Single Node
|
||||
|
||||
Parameters:
|
||||
|
||||
CertificateType:
|
||||
Description: |
|
||||
[selfsigned] Not recommended for production use. If you don't have a FQDN, (DomainName parameter) you can use this option to generate a self-signed certificate.
|
||||
[owncert] Valid for productions environments. If you have a FQDN, (DomainName parameter)
|
||||
and an Elastic IP, you can use this option to use your own certificate.
|
||||
[letsencrypt] Valid for production environments. If you have a FQDN, (DomainName parameter)
|
||||
and an Elastic IP, you can use this option to generate a Let's Encrypt certificate.
|
||||
Type: String
|
||||
AllowedValues:
|
||||
- selfsigned
|
||||
- owncert
|
||||
- letsencrypt
|
||||
Default: selfsigned
|
||||
|
||||
PublicElasticIP:
|
||||
Type: String
|
||||
Description: Previously created Elastic IP for the OpenVidu Deployment.
|
||||
AllowedPattern: ^$|^([01]?\d{1,2}|2[0-4]\d|25[0-5])\.([01]?\d{1,2}|2[0-4]\d|25[0-5])\.([01]?\d{1,2}|2[0-4]\d|25[0-5])\.([01]?\d{1,2}|2[0-4]\d|25[0-5])$
|
||||
ConstraintDescription: The Public Elastic IP does not have a valid IPv4 format
|
||||
|
||||
DomainName:
|
||||
Type: String
|
||||
Description: Domain name for the OpenVidu Deployment.
|
||||
AllowedPattern: ^$|^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$
|
||||
ConstraintDescription: The domain name does not have a valid domain name format
|
||||
|
||||
OwnPublicCertificate:
|
||||
Description: "If certificate type is 'owncert', this parameter will be used to specify the public certificate"
|
||||
Type: String
|
||||
|
||||
OwnPrivateCertificate:
|
||||
Description: "If certificate type is 'owncert', this parameter will be used to specify the private certificate"
|
||||
Type: String
|
||||
|
||||
LetsEncryptEmail:
|
||||
Description: "If certificate type is 'letsencrypt', this email will be used for Let's Encrypt notifications"
|
||||
Type: String
|
||||
|
||||
TurnDomainName:
|
||||
Description: '(Optional) Domain name for the TURN server with TLS. Only needed if your users are behind restrictive firewalls'
|
||||
Type: String
|
||||
Default: ''
|
||||
|
||||
TurnOwnPublicCertificate:
|
||||
Description: "(Optional) This setting is applicable if the certificate type is set to 'owncert' and the TurnDomainName is specified."
|
||||
Type: String
|
||||
Default: ''
|
||||
|
||||
TurnOwnPrivateCertificate:
|
||||
Description: "(Optional) This setting is applicable if the certificate type is set to 'owncert' and the TurnDomainName is specified."
|
||||
Type: String
|
||||
Default: ''
|
||||
|
||||
# EC2 Instance configuration
|
||||
InstanceType:
|
||||
Description: "Specifies the EC2 instance type for your OpenVidu instance"
|
||||
Type: String
|
||||
Default: c6a.xlarge
|
||||
AllowedValues:
|
||||
- t2.large
|
||||
- t2.xlarge
|
||||
- t2.2xlarge
|
||||
- t3.medium
|
||||
- t3.large
|
||||
- t3.xlarge
|
||||
- t3.2xlarge
|
||||
- m4.large
|
||||
- m4.xlarge
|
||||
- m4.2xlarge
|
||||
- m4.4xlarge
|
||||
- m4.10xlarge
|
||||
- m4.16xlarge
|
||||
- m5.large
|
||||
- m5.xlarge
|
||||
- m5.2xlarge
|
||||
- m5.4xlarge
|
||||
- m5.8xlarge
|
||||
- m5.12xlarge
|
||||
- m5.16xlarge
|
||||
- m5.24xlarge
|
||||
- m6i.large
|
||||
- m6i.xlarge
|
||||
- m6i.2xlarge
|
||||
- m6i.4xlarge
|
||||
- m6i.8xlarge
|
||||
- m6i.12xlarge
|
||||
- m6i.16xlarge
|
||||
- m6i.24xlarge
|
||||
- m6i.32xlarge
|
||||
- m6i.metal
|
||||
- c4.large
|
||||
- c4.xlarge
|
||||
- c4.2xlarge
|
||||
- c4.4xlarge
|
||||
- c4.8xlarge
|
||||
- c5.large
|
||||
- c5.xlarge
|
||||
- c5.2xlarge
|
||||
- c5.4xlarge
|
||||
- c5.9xlarge
|
||||
- c5.12xlarge
|
||||
- c5.18xlarge
|
||||
- c5.24xlarge
|
||||
- c6a.large
|
||||
- c6a.xlarge
|
||||
- c6a.2xlarge
|
||||
- c6a.4xlarge
|
||||
- c6a.8xlarge
|
||||
- c6a.12xlarge
|
||||
- c6a.16xlarge
|
||||
- c6a.24xlarge
|
||||
- c6a.32xlarge
|
||||
- c6a.48xlarge
|
||||
- c6a.metal
|
||||
- c6i.large
|
||||
- c6i.xlarge
|
||||
- c6i.2xlarge
|
||||
- c6i.4xlarge
|
||||
- c6i.8xlarge
|
||||
- c6i.12xlarge
|
||||
- c6i.16xlarge
|
||||
- c6i.24xlarge
|
||||
- c6i.32xlarge
|
||||
- c6i.metal
|
||||
- c7a.medium
|
||||
- c7a.large
|
||||
- c7a.xlarge
|
||||
- c7a.2xlarge
|
||||
- c7a.4xlarge
|
||||
- c7a.8xlarge
|
||||
- c7a.12xlarge
|
||||
- c7a.16xlarge
|
||||
- c7a.24xlarge
|
||||
- c7a.32xlarge
|
||||
- c7a.48xlarge
|
||||
- c7a.metal-48xl
|
||||
- c7i.large
|
||||
- c7i.xlarge
|
||||
- c7i.2xlarge
|
||||
- c7i.4xlarge
|
||||
- c7i.8xlarge
|
||||
- c7i.12xlarge
|
||||
- c7i.16xlarge
|
||||
- c7i.24xlarge
|
||||
- c7i.48xlarge
|
||||
- c7i.metal-24xl
|
||||
- c7i.metal-48xl
|
||||
- c5n.large
|
||||
- c5n.xlarge
|
||||
- c5n.2xlarge
|
||||
- c5n.4xlarge
|
||||
- c5n.9xlarge
|
||||
- c5n.18xlarge
|
||||
- m5n.large
|
||||
- m5n.xlarge
|
||||
- m5n.2xlarge
|
||||
- m5n.4xlarge
|
||||
- m5n.8xlarge
|
||||
- m5n.12xlarge
|
||||
- m5n.16xlarge
|
||||
- m5n.24xlarge
|
||||
- m6in.large
|
||||
- m6in.xlarge
|
||||
- m6in.2xlarge
|
||||
- m6in.4xlarge
|
||||
- m6in.8xlarge
|
||||
- m6in.12xlarge
|
||||
- m6in.16xlarge
|
||||
- m6in.24xlarge
|
||||
- m6in.32xlarge
|
||||
- r5n.large
|
||||
- r5n.xlarge
|
||||
- r5n.2xlarge
|
||||
- r5n.4xlarge
|
||||
- r5n.8xlarge
|
||||
- r5n.12xlarge
|
||||
- r5n.16xlarge
|
||||
- r5n.24xlarge
|
||||
ConstraintDescription: "Must be a valid EC2 instance type"
|
||||
|
||||
KeyName:
|
||||
Type: AWS::EC2::KeyPair::KeyName
|
||||
Description: Name of an existing EC2 KeyPair to enable SSH access to the Deployment.
|
||||
AllowedPattern: ^.+$
|
||||
ConstraintDescription: must be the name of an existing EC2 KeyPair.
|
||||
|
||||
AmiId:
|
||||
Type: AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>
|
||||
Default: /aws/service/canonical/ubuntu/server/jammy/stable/current/amd64/hvm/ebs-gp2/ami-id
|
||||
Description: AMI ID for the EC2 instances
|
||||
|
||||
S3AppDataBucketName:
|
||||
Type: String
|
||||
Description: Name of the S3 bucket to store data and recordings. If empty, a bucket will be created
|
||||
|
||||
Metadata:
|
||||
'AWS::CloudFormation::Interface':
|
||||
ParameterGroups:
|
||||
- Label:
|
||||
default: Domain and SSL certificate configuration
|
||||
Parameters:
|
||||
- CertificateType
|
||||
- PublicElasticIP
|
||||
- DomainName
|
||||
- OwnPublicCertificate
|
||||
- OwnPrivateCertificate
|
||||
- LetsEncryptEmail
|
||||
- Label:
|
||||
default: EC2 Instance configuration
|
||||
Parameters:
|
||||
- InstanceType
|
||||
- KeyName
|
||||
- AmiId
|
||||
- Label:
|
||||
default: S3 bucket for application data and recordings
|
||||
Parameters:
|
||||
- S3AppDataBucketName
|
||||
- Label:
|
||||
default: (Optional) TURN server configuration with TLS
|
||||
Parameters:
|
||||
- TurnDomainName
|
||||
- TurnOwnPublicCertificate
|
||||
- TurnOwnPrivateCertificate
|
||||
|
||||
Conditions:
|
||||
PublicElasticIPPresent: !Not [ !Equals [!Ref PublicElasticIP, ""] ]
|
||||
CreateRecordingsBucket: !Equals [!Ref S3AppDataBucketName, ""]
|
||||
|
||||
Resources:
|
||||
|
||||
OpenViduSharedInfo:
|
||||
Type: AWS::SecretsManager::Secret
|
||||
UpdateReplacePolicy: Retain
|
||||
DeletionPolicy: Delete
|
||||
Properties:
|
||||
Name: !Sub openvidu-${AWS::Region}-${AWS::StackName}
|
||||
Description: Secret for OpenVidu to store deployment info and seed secrets
|
||||
SecretString: |
|
||||
{
|
||||
"DOMAIN_NAME": "none",
|
||||
"LIVEKIT_TURN_DOMAIN_NAME": "none",
|
||||
"LETSENCRYPT_EMAIL": "none",
|
||||
"REDIS_PASSWORD": "none",
|
||||
"MONGO_ADMIN_USERNAME": "none",
|
||||
"MONGO_ADMIN_PASSWORD": "none",
|
||||
"MONGO_REPLICA_SET_KEY": "none",
|
||||
"MINIO_URL": "none",
|
||||
"MINIO_ACCESS_KEY": "none",
|
||||
"MINIO_SECRET_KEY": "none",
|
||||
"DASHBOARD_URL": "none",
|
||||
"DASHBOARD_ADMIN_USERNAME": "none",
|
||||
"DASHBOARD_ADMIN_PASSWORD": "none",
|
||||
"GRAFANA_URL": "none",
|
||||
"GRAFANA_ADMIN_USERNAME": "none",
|
||||
"GRAFANA_ADMIN_PASSWORD": "none",
|
||||
"LIVEKIT_API_KEY": "none",
|
||||
"LIVEKIT_API_SECRET": "none",
|
||||
"DEFAULT_APP_USERNAME": "none",
|
||||
"DEFAULT_APP_PASSWORD": "none",
|
||||
"DEFAULT_APP_ADMIN_USERNAME": "none",
|
||||
"DEFAULT_APP_ADMIN_PASSWORD": "none",
|
||||
"ENABLED_MODULES": "none"
|
||||
}
|
||||
|
||||
S3AppDataBucketResource:
|
||||
Type: 'AWS::S3::Bucket'
|
||||
Properties:
|
||||
### Unique bucket name using Stack ID
|
||||
BucketName: !Join ["-" , [ 'openvidu-appdata', !Select [0, !Split ["-", !Select [2, !Split [/, !Ref AWS::StackId ]]]]]]
|
||||
AccessControl: Private
|
||||
PublicAccessBlockConfiguration:
|
||||
BlockPublicAcls: true
|
||||
BlockPublicPolicy: true
|
||||
IgnorePublicAcls : true
|
||||
RestrictPublicBuckets: true
|
||||
DeletionPolicy: Retain
|
||||
UpdateReplacePolicy: Retain
|
||||
Condition: CreateRecordingsBucket
|
||||
|
||||
OpenViduServerRole:
|
||||
Type: 'AWS::IAM::Role'
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service:
|
||||
- ec2.amazonaws.com
|
||||
Action:
|
||||
- 'sts:AssumeRole'
|
||||
Path: /
|
||||
Policies:
|
||||
- PolicyName: !Sub openvidu-policy-${AWS::Region}-${AWS::StackName}
|
||||
PolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- secretsmanager:GetSecretValue
|
||||
- secretsmanager:UpdateSecret
|
||||
Resource: !Ref OpenViduSharedInfo
|
||||
- Fn::If:
|
||||
- CreateRecordingsBucket
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- s3:DeleteObject
|
||||
- s3:GetObject
|
||||
- s3:PutObject
|
||||
Resource: !Sub ${S3AppDataBucketResource.Arn}/*
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- s3:DeleteObject
|
||||
- s3:GetObject
|
||||
- s3:PutObject
|
||||
Resource: !Sub arn:${AWS::Partition}:s3:::${S3AppDataBucketName}/*
|
||||
- Fn::If:
|
||||
- CreateRecordingsBucket
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- s3:ListBucket
|
||||
- s3:GetBucketLocation
|
||||
Resource: !GetAtt S3AppDataBucketResource.Arn
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- s3:ListBucket
|
||||
- s3:GetBucketLocation
|
||||
Resource: !Sub arn:${AWS::Partition}:s3:::${S3AppDataBucketName}
|
||||
RoleName:
|
||||
Fn::Join:
|
||||
# Generate a not too long and unique role name
|
||||
# Getting a unique identifier from the stack id
|
||||
- ''
|
||||
- - openvidu-role-
|
||||
- !Select [4, !Split ['-', !Select [2, !Split ['/', !Ref AWS::StackId]]]]
|
||||
|
||||
OpenViduServerInstanceProfile:
|
||||
Type: 'AWS::IAM::InstanceProfile'
|
||||
Properties:
|
||||
Roles:
|
||||
- !Ref OpenViduServerRole
|
||||
InstanceProfileName: !Sub openvidu-instance-profile-${AWS::Region}-${AWS::StackName}
|
||||
|
||||
OpenviduServer:
|
||||
Type: 'AWS::EC2::Instance'
|
||||
Metadata:
|
||||
Comment: 'Install and configure OpenVidu Community - Single Node'
|
||||
AWS::CloudFormation::Init:
|
||||
config:
|
||||
files:
|
||||
'/usr/local/bin/install.sh':
|
||||
content: !Sub |
|
||||
#!/bin/bash -x
|
||||
OPENVIDU_VERSION=main
|
||||
DOMAIN=
|
||||
YQ_VERSION=v4.44.5
|
||||
|
||||
apt-get update && apt-get install -y \
|
||||
curl \
|
||||
unzip \
|
||||
jq \
|
||||
wget
|
||||
wget https://github.com/mikefarah/yq/releases/download/${!YQ_VERSION}/yq_linux_amd64.tar.gz -O - |\
|
||||
tar xz && mv yq_linux_amd64 /usr/bin/yq
|
||||
|
||||
# Install aws-cli
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
|
||||
unzip -qq awscliv2.zip
|
||||
./aws/install
|
||||
rm -rf awscliv2.zip aws
|
||||
|
||||
# Token for IMDSv2
|
||||
TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
|
||||
# Configure Domain
|
||||
if [[ "${DomainName}" == '' ]]; then
|
||||
[ ! -d "/usr/share/openvidu" ] && mkdir -p /usr/share/openvidu
|
||||
PublicHostname=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/public-hostname)
|
||||
DOMAIN=$PublicHostname
|
||||
echo $PublicHostname > /usr/share/openvidu/old-host-name
|
||||
else
|
||||
DOMAIN=${DomainName}
|
||||
fi
|
||||
DOMAIN="$(/usr/local/bin/store_secret.sh save DOMAIN_NAME "$DOMAIN")"
|
||||
|
||||
# Store usernames and generate random passwords
|
||||
REDIS_PASSWORD="$(/usr/local/bin/store_secret.sh generate REDIS_PASSWORD)"
|
||||
MONGO_ADMIN_USERNAME="$(/usr/local/bin/store_secret.sh save MONGO_ADMIN_USERNAME "mongoadmin")"
|
||||
MONGO_ADMIN_PASSWORD="$(/usr/local/bin/store_secret.sh generate MONGO_ADMIN_PASSWORD)"
|
||||
MONGO_REPLICA_SET_KEY="$(/usr/local/bin/store_secret.sh generate MONGO_REPLICA_SET_KEY)"
|
||||
MINIO_ACCESS_KEY="$(/usr/local/bin/store_secret.sh save MINIO_ACCESS_KEY "minioadmin")"
|
||||
MINIO_SECRET_KEY="$(/usr/local/bin/store_secret.sh generate MINIO_SECRET_KEY)"
|
||||
DASHBOARD_ADMIN_USERNAME="$(/usr/local/bin/store_secret.sh save DASHBOARD_ADMIN_USERNAME "dashboardadmin")"
|
||||
DASHBOARD_ADMIN_PASSWORD="$(/usr/local/bin/store_secret.sh generate DASHBOARD_ADMIN_PASSWORD)"
|
||||
GRAFANA_ADMIN_USERNAME="$(/usr/local/bin/store_secret.sh save GRAFANA_ADMIN_USERNAME "grafanaadmin")"
|
||||
GRAFANA_ADMIN_PASSWORD="$(/usr/local/bin/store_secret.sh generate GRAFANA_ADMIN_PASSWORD)"
|
||||
DEFAULT_APP_USERNAME="$(/usr/local/bin/store_secret.sh save DEFAULT_APP_USERNAME "calluser")"
|
||||
DEFAULT_APP_PASSWORD="$(/usr/local/bin/store_secret.sh generate DEFAULT_APP_PASSWORD)"
|
||||
DEFAULT_APP_ADMIN_USERNAME="$(/usr/local/bin/store_secret.sh save DEFAULT_APP_ADMIN_USERNAME "calladmin")"
|
||||
DEFAULT_APP_ADMIN_PASSWORD="$(/usr/local/bin/store_secret.sh generate DEFAULT_APP_ADMIN_PASSWORD)"
|
||||
ENABLED_MODULES="$(/usr/local/bin/store_secret.sh save ENABLED_MODULES "observability,app")"
|
||||
LIVEKIT_API_KEY="$(/usr/local/bin/store_secret.sh generate LIVEKIT_API_KEY "API" 12)"
|
||||
LIVEKIT_API_SECRET="$(/usr/local/bin/store_secret.sh generate LIVEKIT_API_SECRET)"
|
||||
|
||||
# Base command
|
||||
INSTALL_COMMAND="sh <(curl -fsSL http://get.openvidu.io/community/singlenode/$OPENVIDU_VERSION/install.sh)"
|
||||
|
||||
# Common arguments
|
||||
COMMON_ARGS=(
|
||||
"--no-tty"
|
||||
"--install"
|
||||
"--environment=aws"
|
||||
"--deployment-type=single_node"
|
||||
"--domain-name=$DOMAIN"
|
||||
"--enabled-modules='$ENABLED_MODULES'"
|
||||
"--redis-password=$REDIS_PASSWORD"
|
||||
"--mongo-admin-user=$MONGO_ADMIN_USERNAME"
|
||||
"--mongo-admin-password=$MONGO_ADMIN_PASSWORD"
|
||||
"--mongo-replica-set-key=$MONGO_REPLICA_SET_KEY"
|
||||
"--minio-access-key=$MINIO_ACCESS_KEY"
|
||||
"--minio-secret-key=$MINIO_SECRET_KEY"
|
||||
"--dashboard-admin-user=$DASHBOARD_ADMIN_USERNAME"
|
||||
"--dashboard-admin-password=$DASHBOARD_ADMIN_PASSWORD"
|
||||
"--grafana-admin-user=$GRAFANA_ADMIN_USERNAME"
|
||||
"--grafana-admin-password=$GRAFANA_ADMIN_PASSWORD"
|
||||
"--default-app-user=$DEFAULT_APP_USERNAME"
|
||||
"--default-app-password=$DEFAULT_APP_PASSWORD"
|
||||
"--default-app-admin-user=$DEFAULT_APP_ADMIN_USERNAME"
|
||||
"--default-app-admin-password=$DEFAULT_APP_ADMIN_PASSWORD"
|
||||
"--livekit-api-key=$LIVEKIT_API_KEY"
|
||||
"--livekit-api-secret=$LIVEKIT_API_SECRET"
|
||||
)
|
||||
|
||||
# Turn with TLS
|
||||
if [[ "${TurnDomainName}" != '' ]]; then
|
||||
LIVEKIT_TURN_DOMAIN_NAME=$(/usr/local/bin/store_secret.sh save LIVEKIT_TURN_DOMAIN_NAME "${TurnDomainName}")
|
||||
COMMON_ARGS+=(
|
||||
"--turn-domain-name=$LIVEKIT_TURN_DOMAIN_NAME"
|
||||
)
|
||||
fi
|
||||
|
||||
# Certificate arguments
|
||||
if [[ "${CertificateType}" == "selfsigned" ]]; then
|
||||
CERT_ARGS=(
|
||||
"--certificate-type=selfsigned"
|
||||
)
|
||||
elif [[ "${CertificateType}" == "letsencrypt" ]]; then
|
||||
LETSENCRYPT_EMAIL=$(/usr/local/bin/store_secret.sh save LETSENCRYPT_EMAIL "${LetsEncryptEmail}")
|
||||
CERT_ARGS=(
|
||||
"--certificate-type=letsencrypt"
|
||||
"--letsencrypt-email=$LETSENCRYPT_EMAIL"
|
||||
)
|
||||
else
|
||||
# Download owncert files
|
||||
mkdir -p /tmp/owncert
|
||||
wget -O /tmp/owncert/fullchain.pem ${OwnPublicCertificate}
|
||||
wget -O /tmp/owncert/privkey.pem ${OwnPrivateCertificate}
|
||||
|
||||
# Convert to base64
|
||||
OWN_CERT_CRT=$(base64 -w 0 /tmp/owncert/fullchain.pem)
|
||||
OWN_CERT_KEY=$(base64 -w 0 /tmp/owncert/privkey.pem)
|
||||
|
||||
CERT_ARGS=(
|
||||
"--certificate-type=owncert"
|
||||
"--owncert-public-key=$OWN_CERT_CRT"
|
||||
"--owncert-private-key=$OWN_CERT_KEY"
|
||||
)
|
||||
|
||||
# Turn with TLS and own certificate
|
||||
if [[ "${TurnDomainName}" != '' ]]; then
|
||||
# Download owncert files
|
||||
mkdir -p /tmp/owncert-turn
|
||||
wget -O /tmp/owncert-turn/fullchain.pem ${TurnOwnPublicCertificate}
|
||||
wget -O /tmp/owncert-turn/privkey.pem ${TurnOwnPrivateCertificate}
|
||||
|
||||
# Convert to base64
|
||||
OWN_CERT_CRT_TURN=$(base64 -w 0 /tmp/owncert-turn/fullchain.pem)
|
||||
OWN_CERT_KEY_TURN=$(base64 -w 0 /tmp/owncert-turn/privkey.pem)
|
||||
|
||||
CERT_ARGS+=(
|
||||
"--turn-owncert-private-key=$OWN_CERT_KEY_TURN"
|
||||
"--turn-owncert-public-key=$OWN_CERT_CRT_TURN"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
# Construct the final command with all arguments
|
||||
FINAL_COMMAND="$INSTALL_COMMAND $(printf "%s " "${!COMMON_ARGS[@]}") $(printf "%s " "${!CERT_ARGS[@]}")"
|
||||
|
||||
# Install OpenVidu
|
||||
exec bash -c "$FINAL_COMMAND"
|
||||
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/config_s3.sh':
|
||||
content: !Sub
|
||||
- |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Install dir and config dir
|
||||
INSTALL_DIR="/opt/openvidu"
|
||||
CONFIG_DIR="${!INSTALL_DIR}/config"
|
||||
|
||||
# Config S3 bucket
|
||||
EXTERNAL_S3_ENDPOINT="https://s3.${AWS::Region}.amazonaws.com"
|
||||
EXTERNAL_S3_REGION="${AWS::Region}"
|
||||
EXTERNAL_S3_PATH_STYLE_ACCESS="false"
|
||||
EXTERNAL_S3_BUCKET_APP_DATA=${S3RecordingsBucketResourceName}
|
||||
sed -i "s|EXTERNAL_S3_ENDPOINT=.*|EXTERNAL_S3_ENDPOINT=$EXTERNAL_S3_ENDPOINT|" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s|EXTERNAL_S3_REGION=.*|EXTERNAL_S3_REGION=$EXTERNAL_S3_REGION|" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s|EXTERNAL_S3_PATH_STYLE_ACCESS=.*|EXTERNAL_S3_PATH_STYLE_ACCESS=$EXTERNAL_S3_PATH_STYLE_ACCESS|" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s|EXTERNAL_S3_BUCKET_APP_DATA=.*|EXTERNAL_S3_BUCKET_APP_DATA=$EXTERNAL_S3_BUCKET_APP_DATA|" "${!CONFIG_DIR}/openvidu.env"
|
||||
- S3RecordingsBucketResourceName: !If
|
||||
- CreateRecordingsBucket
|
||||
- !Ref S3AppDataBucketResource
|
||||
- !Ref S3AppDataBucketName
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/after_install.sh':
|
||||
content: !Sub |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Get current shared secret
|
||||
SHARED_SECRET=$(aws secretsmanager get-secret-value \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id openvidu-${AWS::Region}-${AWS::StackName} \
|
||||
--query SecretString --output text)
|
||||
|
||||
# Token for IMDSv2
|
||||
TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
|
||||
if [[ "${DomainName}" == '' ]]; then
|
||||
PublicHostname=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/public-hostname)
|
||||
DOMAIN=$PublicHostname
|
||||
else
|
||||
DOMAIN=${DomainName}
|
||||
fi
|
||||
|
||||
# Generate URLs
|
||||
DASHBOARD_URL="https://${!DOMAIN}/dashboard/"
|
||||
GRAFANA_URL="https://${!DOMAIN}/grafana/"
|
||||
MINIO_URL="https://${!DOMAIN}/minio-console/"
|
||||
|
||||
# Update shared secret
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DOMAIN_NAME": "'"$DOMAIN"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DASHBOARD_URL": "'"$DASHBOARD_URL"'" }')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"GRAFANA_URL": "'"$GRAFANA_URL"'" }')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MINIO_URL": "'"$MINIO_URL"'" }')"
|
||||
|
||||
# Update shared secret
|
||||
aws secretsmanager update-secret \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id openvidu-${AWS::Region}-${AWS::StackName} \
|
||||
--secret-string "$SHARED_SECRET"
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/update_config_from_secret.sh':
|
||||
content: !Sub |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Token for IMDSv2
|
||||
TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
|
||||
# Get current shared secret
|
||||
SHARED_SECRET=$(aws secretsmanager get-secret-value \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id openvidu-${AWS::Region}-${AWS::StackName} \
|
||||
--query SecretString --output text)
|
||||
|
||||
# Installation directory
|
||||
INSTALL_DIR="/opt/openvidu"
|
||||
CONFIG_DIR="${!INSTALL_DIR}/config"
|
||||
|
||||
# Replace DOMAIN_NAME
|
||||
export DOMAIN=$(echo $SHARED_SECRET | jq -r .DOMAIN_NAME)
|
||||
if [[ $DOMAIN == *"compute.amazonaws.com"* ]] || [[ -z $DOMAIN ]]; then
|
||||
PublicHostname=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/public-hostname)
|
||||
DOMAIN=$PublicHostname
|
||||
fi
|
||||
if [[ -n "$DOMAIN" ]]; then
|
||||
sed -i "s/DOMAIN_NAME=.*/DOMAIN_NAME=$DOMAIN/" "${!CONFIG_DIR}/openvidu.env"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Replace LIVEKIT_TURN_DOMAIN_NAME
|
||||
export LIVEKIT_TURN_DOMAIN_NAME=$(echo $SHARED_SECRET | jq -r .LIVEKIT_TURN_DOMAIN_NAME)
|
||||
if [[ -n "$LIVEKIT_TURN_DOMAIN_NAME" ]]; then
|
||||
sed -i "s/LIVEKIT_TURN_DOMAIN_NAME=.*/LIVEKIT_TURN_DOMAIN_NAME=$LIVEKIT_TURN_DOMAIN_NAME/" "${!CONFIG_DIR}/openvidu.env"
|
||||
fi
|
||||
|
||||
if [[ ${CertificateType} == "letsencrypt" ]]; then
|
||||
export LETSENCRYPT_EMAIL=$(echo $SHARED_SECRET | jq -r .LETSENCRYPT_EMAIL)
|
||||
sed -i "s/LETSENCRYPT_EMAIL=.*/LETSENCRYPT_EMAIL=$LETSENCRYPT_EMAIL/" "${!CONFIG_DIR}/openvidu.env"
|
||||
fi
|
||||
|
||||
# Replace rest of the values
|
||||
sed -i "s/REDIS_PASSWORD=.*/REDIS_PASSWORD=$(echo $SHARED_SECRET | jq -r .REDIS_PASSWORD)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/MONGO_ADMIN_USERNAME=.*/MONGO_ADMIN_USERNAME=$(echo $SHARED_SECRET | jq -r .MONGO_ADMIN_USERNAME)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/MONGO_ADMIN_PASSWORD=.*/MONGO_ADMIN_PASSWORD=$(echo $SHARED_SECRET | jq -r .MONGO_ADMIN_PASSWORD)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/MONGO_REPLICA_SET_KEY=.*/MONGO_REPLICA_SET_KEY=$(echo $SHARED_SECRET | jq -r .MONGO_REPLICA_SET_KEY)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/DASHBOARD_ADMIN_USERNAME=.*/DASHBOARD_ADMIN_USERNAME=$(echo $SHARED_SECRET | jq -r .DASHBOARD_ADMIN_USERNAME)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/DASHBOARD_ADMIN_PASSWORD=.*/DASHBOARD_ADMIN_PASSWORD=$(echo $SHARED_SECRET | jq -r .DASHBOARD_ADMIN_PASSWORD)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/MINIO_ACCESS_KEY=.*/MINIO_ACCESS_KEY=$(echo $SHARED_SECRET | jq -r .MINIO_ACCESS_KEY)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/MINIO_SECRET_KEY=.*/MINIO_SECRET_KEY=$(echo $SHARED_SECRET | jq -r .MINIO_SECRET_KEY)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/GRAFANA_ADMIN_USERNAME=.*/GRAFANA_ADMIN_USERNAME=$(echo $SHARED_SECRET | jq -r .GRAFANA_ADMIN_USERNAME)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/GRAFANA_ADMIN_PASSWORD=.*/GRAFANA_ADMIN_PASSWORD=$(echo $SHARED_SECRET | jq -r .GRAFANA_ADMIN_PASSWORD)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/LIVEKIT_API_KEY=.*/LIVEKIT_API_KEY=$(echo $SHARED_SECRET | jq -r .LIVEKIT_API_KEY)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/LIVEKIT_API_SECRET=.*/LIVEKIT_API_SECRET=$(echo $SHARED_SECRET | jq -r .LIVEKIT_API_SECRET)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
sed -i "s/CALL_USER=.*/CALL_USER=$(echo $SHARED_SECRET | jq -r .DEFAULT_APP_USERNAME)/" "${!CONFIG_DIR}/app.env"
|
||||
sed -i "s/CALL_SECRET=.*/CALL_SECRET=$(echo $SHARED_SECRET | jq -r .DEFAULT_APP_PASSWORD)/" "${!CONFIG_DIR}/app.env"
|
||||
sed -i "s/CALL_ADMIN_USER=.*/CALL_ADMIN_USER=$(echo $SHARED_SECRET | jq -r .DEFAULT_APP_ADMIN_USERNAME)/" "${!CONFIG_DIR}/app.env"
|
||||
sed -i "s/CALL_ADMIN_SECRET=.*/CALL_ADMIN_SECRET=$(echo $SHARED_SECRET | jq -r .DEFAULT_APP_ADMIN_PASSWORD)/" "${!CONFIG_DIR}/app.env"
|
||||
sed -i "s/ENABLED_MODULES=.*/ENABLED_MODULES=$(echo $SHARED_SECRET | jq -r .ENABLED_MODULES)/" "${!CONFIG_DIR}/openvidu.env"
|
||||
|
||||
# Update URLs in secret
|
||||
DASHBOARD_URL="https://${!DOMAIN}/dashboard/"
|
||||
GRAFANA_URL="https://${!DOMAIN}/grafana/"
|
||||
MINIO_URL="https://${!DOMAIN}/minio-console/"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DOMAIN_NAME": "'"$DOMAIN"'" }')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DASHBOARD_URL": "'"$DASHBOARD_URL"'" }')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"GRAFANA_URL": "'"$GRAFANA_URL"'" }')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MINIO_URL": "'"$MINIO_URL"'" }')"
|
||||
aws secretsmanager update-secret \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id openvidu-${AWS::Region}-${AWS::StackName} \
|
||||
--secret-string "$SHARED_SECRET"
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/update_secret_from_config.sh':
|
||||
content: !Sub |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Get current shared secret
|
||||
SHARED_SECRET=$(aws secretsmanager get-secret-value \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id openvidu-${AWS::Region}-${AWS::StackName} \
|
||||
--query SecretString --output text)
|
||||
|
||||
# Installation directory
|
||||
INSTALL_DIR="/opt/openvidu"
|
||||
CONFIG_DIR="${!INSTALL_DIR}/config"
|
||||
|
||||
if [[ ${CertificateType} == "letsencrypt" ]]; then
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"LETSENCRYPT_EMAIL": "'"$(/usr/local/bin/get_value_from_config.sh LETSENCRYPT_EMAIL "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
fi
|
||||
|
||||
# Update shared secret
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"REDIS_PASSWORD": "'"$(/usr/local/bin/get_value_from_config.sh REDIS_PASSWORD "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DOMAIN_NAME": "'"$(/usr/local/bin/get_value_from_config.sh DOMAIN_NAME "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"LIVEKIT_TURN_DOMAIN_NAME": "'"$(/usr/local/bin/get_value_from_config.sh LIVEKIT_TURN_DOMAIN_NAME "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MONGO_ADMIN_USERNAME": "'"$(/usr/local/bin/get_value_from_config.sh MONGO_ADMIN_USERNAME "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MONGO_ADMIN_PASSWORD": "'"$(/usr/local/bin/get_value_from_config.sh MONGO_ADMIN_PASSWORD "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MONGO_REPLICA_SET_KEY": "'"$(/usr/local/bin/get_value_from_config.sh MONGO_REPLICA_SET_KEY "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MINIO_ACCESS_KEY": "'"$(/usr/local/bin/get_value_from_config.sh MINIO_ACCESS_KEY "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"MINIO_SECRET_KEY": "'"$(/usr/local/bin/get_value_from_config.sh MINIO_SECRET_KEY "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DASHBOARD_ADMIN_USERNAME": "'"$(/usr/local/bin/get_value_from_config.sh DASHBOARD_ADMIN_USERNAME "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DASHBOARD_ADMIN_PASSWORD": "'"$(/usr/local/bin/get_value_from_config.sh DASHBOARD_ADMIN_PASSWORD "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"GRAFANA_ADMIN_USERNAME": "'"$(/usr/local/bin/get_value_from_config.sh GRAFANA_ADMIN_USERNAME "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"GRAFANA_ADMIN_PASSWORD": "'"$(/usr/local/bin/get_value_from_config.sh GRAFANA_ADMIN_PASSWORD "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"LIVEKIT_API_KEY": "'"$(/usr/local/bin/get_value_from_config.sh LIVEKIT_API_KEY "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"LIVEKIT_API_SECRET": "'"$(/usr/local/bin/get_value_from_config.sh LIVEKIT_API_SECRET "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DEFAULT_APP_USERNAME": "'"$(/usr/local/bin/get_value_from_config.sh CALL_USER "${!CONFIG_DIR}/app.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DEFAULT_APP_PASSWORD": "'"$(/usr/local/bin/get_value_from_config.sh CALL_SECRET "${!CONFIG_DIR}/app.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DEFAULT_APP_ADMIN_USERNAME": "'"$(/usr/local/bin/get_value_from_config.sh CALL_ADMIN_USER "${!CONFIG_DIR}/app.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"DEFAULT_APP_ADMIN_PASSWORD": "'"$(/usr/local/bin/get_value_from_config.sh CALL_ADMIN_SECRET "${!CONFIG_DIR}/app.env")"'"}')"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"ENABLED_MODULES": "'"$(/usr/local/bin/get_value_from_config.sh ENABLED_MODULES "${!CONFIG_DIR}/openvidu.env")"'"}')"
|
||||
|
||||
# Update shared secret
|
||||
aws secretsmanager update-secret \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id openvidu-${AWS::Region}-${AWS::StackName} \
|
||||
--secret-string "$SHARED_SECRET"
|
||||
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/get_value_from_config.sh':
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Function to get the value of a given key from the environment file
|
||||
get_value() {
|
||||
local key="$1"
|
||||
local file_path="$2"
|
||||
|
||||
# Use grep to find the line with the key, ignoring lines starting with #
|
||||
# Use awk to split on '=' and print the second field, which is the value
|
||||
local value=$(grep -E "^\s*$key\s*=" "$file_path" | awk -F= '{print $2}' | sed 's/#.*//; s/^\s*//; s/\s*$//')
|
||||
|
||||
# If the value is empty, return "none"
|
||||
if [ -z "$value" ]; then
|
||||
echo "none"
|
||||
else
|
||||
echo "$value"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if the correct number of arguments are supplied
|
||||
if [ "$#" -ne 2 ]; then
|
||||
echo "Usage: $0 <key> <file_path>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the key and file path from the arguments
|
||||
key="$1"
|
||||
file_path="$2"
|
||||
|
||||
# Get and print the value
|
||||
get_value "$key" "$file_path"
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/store_secret.sh':
|
||||
content: !Sub |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Modes: save, generate
|
||||
# save mode: save the secret in the secret manager
|
||||
# generate mode: generate a random password and save it in the secret manager
|
||||
MODE="$1"
|
||||
SHARED_SECRET="$(aws secretsmanager get-secret-value \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id ${OpenViduSharedInfo} \
|
||||
--query SecretString --output text)"
|
||||
if [[ "$MODE" == "generate" ]]; then
|
||||
SECRET_KEY_NAME="$2"
|
||||
PREFIX="${!3:-}"
|
||||
LENGTH="${!4:-44}"
|
||||
RANDOM_PASSWORD="$(openssl rand -base64 64 | tr -d '+/=\n' | cut -c -${!LENGTH})"
|
||||
RANDOM_PASSWORD="${!PREFIX}${!RANDOM_PASSWORD}"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"'"$SECRET_KEY_NAME"'": "'"$RANDOM_PASSWORD"'"}')"
|
||||
aws secretsmanager update-secret \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id ${OpenViduSharedInfo} \
|
||||
--secret-string "$SHARED_SECRET" > /dev/null 2>&1
|
||||
echo "$RANDOM_PASSWORD"
|
||||
elif [[ "$MODE" == "save" ]]; then
|
||||
SECRET_KEY_NAME="$2"
|
||||
SECRET_VALUE="$3"
|
||||
SHARED_SECRET="$(echo "$SHARED_SECRET" | jq '. + {"'"$SECRET_KEY_NAME"'": "'"$SECRET_VALUE"'"}')"
|
||||
aws secretsmanager update-secret \
|
||||
--region ${AWS::Region} \
|
||||
--secret-id ${OpenViduSharedInfo} \
|
||||
--secret-string "$SHARED_SECRET" > /dev/null 2>&1
|
||||
echo "$SECRET_VALUE"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/check_app_ready.sh':
|
||||
content: |
|
||||
#!/bin/bash
|
||||
while true; do
|
||||
HTTP_STATUS=$(curl -Ik http://localhost:7880 | head -n1 | awk '{print $2}')
|
||||
if [ $HTTP_STATUS == 200 ]; then
|
||||
break
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
'/usr/local/bin/restart.sh':
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Stop all services
|
||||
systemctl stop openvidu
|
||||
|
||||
# Update config from secret
|
||||
/usr/local/bin/update_config_from_secret.sh
|
||||
|
||||
# Start all services
|
||||
systemctl start openvidu
|
||||
mode: "000755"
|
||||
owner: "root"
|
||||
group: "root"
|
||||
Properties:
|
||||
ImageId: !Ref AmiId
|
||||
LaunchTemplate:
|
||||
# Enable IMDSv2 by default
|
||||
LaunchTemplateName: IMDSV2
|
||||
Version: !GetAtt IMDSv2LaunchTemplate.DefaultVersionNumber
|
||||
InstanceType: !Ref InstanceType
|
||||
IamInstanceProfile: !Ref OpenViduServerInstanceProfile
|
||||
SecurityGroups:
|
||||
- !Ref WebServerSecurityGroup
|
||||
KeyName: !Ref KeyName
|
||||
Tags:
|
||||
- Key: Name
|
||||
Value: !Ref 'AWS::StackName'
|
||||
UserData:
|
||||
Fn::Base64: !Sub |
|
||||
#!/bin/bash -x
|
||||
set -eu -o pipefail
|
||||
|
||||
apt-get update && apt-get install -y \
|
||||
python3-pip \
|
||||
ec2-instance-connect
|
||||
pip3 install https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-py3-latest.tar.gz
|
||||
|
||||
cfn-init --region ${AWS::Region} --stack ${AWS::StackId} --resource OpenviduServer
|
||||
|
||||
export HOME="/root"
|
||||
|
||||
# Install OpenVidu
|
||||
/usr/local/bin/install.sh || { echo "[OpenVidu] error installing OpenVidu"; exit 1; }
|
||||
|
||||
# Config S3 bucket
|
||||
/usr/local/bin/config_s3.sh || { echo "[OpenVidu] error configuring S3 bucket"; exit 1; }
|
||||
|
||||
# Start OpenVidu
|
||||
systemctl start openvidu || { echo "[OpenVidu] error starting OpenVidu"; exit 1; }
|
||||
|
||||
# Update shared secret
|
||||
/usr/local/bin/after_install.sh || { echo "[OpenVidu] error updating shared secret"; exit 1; }
|
||||
|
||||
# Launch on reboot
|
||||
echo "@reboot /usr/local/bin/restart.sh" | crontab
|
||||
|
||||
# Wait for the app
|
||||
/usr/local/bin/check_app_ready.sh
|
||||
|
||||
# sending the finish call
|
||||
/usr/local/bin/cfn-signal -e $? --stack ${AWS::StackId} --resource WaitCondition --region ${AWS::Region}
|
||||
|
||||
BlockDeviceMappings:
|
||||
- DeviceName: /dev/sda1
|
||||
Ebs:
|
||||
VolumeType: gp2
|
||||
DeleteOnTermination: true
|
||||
VolumeSize: 200
|
||||
|
||||
MyEIP:
|
||||
Type: 'AWS::EC2::EIPAssociation'
|
||||
Condition: PublicElasticIPPresent
|
||||
Properties:
|
||||
InstanceId: !Ref OpenviduServer
|
||||
EIP: !Ref PublicElasticIP
|
||||
|
||||
IMDSv2LaunchTemplate:
|
||||
Type: AWS::EC2::LaunchTemplate
|
||||
Properties:
|
||||
LaunchTemplateName: IMDSV2
|
||||
LaunchTemplateData:
|
||||
MetadataOptions:
|
||||
HttpEndpoint: enabled
|
||||
HttpPutResponseHopLimit: 1
|
||||
HttpTokens: required
|
||||
|
||||
WaitCondition:
|
||||
Type: 'AWS::CloudFormation::WaitCondition'
|
||||
CreationPolicy:
|
||||
ResourceSignal:
|
||||
Timeout: PT10M
|
||||
Count: '1'
|
||||
|
||||
WebServerSecurityGroup:
|
||||
Type: 'AWS::EC2::SecurityGroup'
|
||||
Properties:
|
||||
GroupDescription: SSH, Proxy and OpenVidu WebRTC Ports
|
||||
SecurityGroupIngress:
|
||||
- IpProtocol: tcp
|
||||
FromPort: 22
|
||||
ToPort: 22
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 22
|
||||
ToPort: 22
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 80
|
||||
ToPort: 80
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 80
|
||||
ToPort: 80
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 443
|
||||
ToPort: 443
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 443
|
||||
ToPort: 443
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: udp
|
||||
FromPort: 443
|
||||
ToPort: 443
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: udp
|
||||
FromPort: 443
|
||||
ToPort: 443
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 1935
|
||||
ToPort: 1935
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 1935
|
||||
ToPort: 1935
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: udp
|
||||
FromPort: 7885
|
||||
ToPort: 7885
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: udp
|
||||
FromPort: 7885
|
||||
ToPort: 7885
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: udp
|
||||
FromPort: 50000
|
||||
ToPort: 60000
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: udp
|
||||
FromPort: 50000
|
||||
ToPort: 60000
|
||||
CidrIpv6: ::/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 50000
|
||||
ToPort: 60000
|
||||
CidrIp: 0.0.0.0/0
|
||||
- IpProtocol: tcp
|
||||
FromPort: 50000
|
||||
ToPort: 60000
|
||||
CidrIpv6: ::/0
|
||||
|
||||
Outputs:
|
||||
ServicesAndCredentials:
|
||||
Description: Services and credentials
|
||||
Value: !Sub https://${AWS::Region}.console.aws.amazon.com/secretsmanager/home?region=${AWS::Region}#!/secret?name=openvidu-${AWS::Region}-${AWS::StackName}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,165 @@
|
|||
#!/bin/sh
|
||||
# Docker & Docker Compose will need to be installed on the machine
|
||||
set -eu
|
||||
export DOCKER_VERSION="${DOCKER_VERSION:-27.5.1}"
|
||||
export DOCKER_COMPOSE_VERSION="${DOCKER_COMPOSE_VERSION:-v2.32.4}"
|
||||
export OPENVIDU_VERSION="${OPENVIDU_VERSION:-main}"
|
||||
export INSTALLER_IMAGE="${INSTALLER_IMAGE:-docker.io/openvidu/openvidu-installer:${OPENVIDU_VERSION}}"
|
||||
export MINIO_SERVER_IMAGE="${MINIO_SERVER_IMAGE:-docker.io/bitnami/minio:2025.2.7-debian-12-r0}"
|
||||
export MINIO_CLIENT_IMAGE="${MINIO_CLIENT_IMAGE:-docker.io/minio/mc:RELEASE.2025-02-08T19-14-21Z}"
|
||||
export MONGO_SERVER_IMAGE="${MONGO_SERVER_IMAGE:-docker.io/mongo:8.0.4}"
|
||||
export REDIS_SERVER_IMAGE="${REDIS_SERVER_IMAGE:-docker.io/redis:7.4.2-alpine}"
|
||||
export BUSYBOX_IMAGE="${BUSYBOX_IMAGE:-docker.io/busybox:1.37.0}"
|
||||
export CADDY_SERVER_IMAGE="${CADDY_SERVER_IMAGE:-docker.io/openvidu/openvidu-caddy:${OPENVIDU_VERSION}}"
|
||||
export CADDY_SERVER_PRO_IMAGE="${CADDY_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-caddy:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_OPERATOR_IMAGE="${OPENVIDU_OPERATOR_IMAGE:-docker.io/openvidu/openvidu-operator:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_PRO_IMAGE="${OPENVIDU_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-server-pro:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_IMAGE="${OPENVIDU_SERVER_IMAGE:-docker.io/openvidu/openvidu-server:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_CALL_SERVER_IMAGE="${OPENVIDU_CALL_SERVER_IMAGE:-docker.io/openvidu/openvidu-call:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_PRO_IMAGE="${OPENVIDU_DASHBOARD_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_IMAGE="${OPENVIDU_DASHBOARD_IMAGE:-docker.io/openvidu/openvidu-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_V2COMPATIBILITY_IMAGE="${OPENVIDU_V2COMPATIBILITY_IMAGE:-docker.io/openvidu/openvidu-v2compatibility:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_INGRESS_SERVER_IMAGE="${LIVEKIT_INGRESS_SERVER_IMAGE:-docker.io/openvidu/ingress:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_EGRESS_SERVER_IMAGE="${LIVEKIT_EGRESS_SERVER_IMAGE:-docker.io/livekit/egress:v1.9.0}"
|
||||
export PROMETHEUS_IMAGE="${PROMETHEUS_IMAGE:-docker.io/prom/prometheus:v3.1.0}"
|
||||
export PROMTAIL_IMAGE="${PROMTAIL_IMAGE:-docker.io/grafana/promtail:3.3.2}"
|
||||
export LOKI_IMAGE="${LOKI_IMAGE:-docker.io/grafana/loki:3.3.2}"
|
||||
export MIMIR_IMAGE="${MIMIR_IMAGE:-docker.io/bitnami/grafana-mimir:2.15.0}"
|
||||
export GRAFANA_IMAGE="${GRAFANA_IMAGE:-docker.io/grafana/grafana:11.5.1}"
|
||||
|
||||
wait_for_docker() {
|
||||
echo "Waiting for Docker to start..."
|
||||
|
||||
# Set a countdown (in seconds)
|
||||
COUNTDOWN=60
|
||||
|
||||
while [ "$COUNTDOWN" -gt 0 ]; do
|
||||
if docker info >/dev/null 2>&1; then
|
||||
echo "Docker started successfully."
|
||||
break
|
||||
else
|
||||
# Reduce the countdown by 1 each iteration.
|
||||
COUNTDOWN=$(( COUNTDOWN - 1 ))
|
||||
|
||||
if [ "$COUNTDOWN" -eq 0 ]; then
|
||||
echo "ERROR: Docker did not start within the allocated time."
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check if executing as root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker > /dev/null 2>&1
|
||||
then
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh --version "${DOCKER_VERSION}" || { echo "Can't install Docker automatically. Install it manually and run this script again"; exit 1; }
|
||||
else
|
||||
echo "Docker already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose > /dev/null 2>&1
|
||||
then
|
||||
TIME_LIMIT_SECONDS=20
|
||||
START_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
while true
|
||||
do
|
||||
CURRENT_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
if [ $((CURRENT_TIME-START_TIME)) -gt $TIME_LIMIT_SECONDS ]; then
|
||||
echo "Error downloading docker-compose. Could not download it in $TIME_LIMIT_SECONDS seconds"
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
exit 1
|
||||
fi
|
||||
STATUS_RECEIVED=$(curl --retry 5 --retry-max-time 40 --write-out "%{http_code}\n" -L "https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose)
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
echo "Error downloading docker-compose. curl failed with exit code $CURL_EXIT_CODE. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
if [ "${STATUS_RECEIVED}" -ne "200" ]; then
|
||||
echo "Error downloading docker-compose. Received HTTP status code $STATUS_RECEIVED. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
echo "Success downloading docker-compose"
|
||||
chmod 755 /usr/local/bin/docker-compose
|
||||
break
|
||||
done
|
||||
|
||||
# Create a symbolic link to docker-compose in the Docker CLI plugins directory
|
||||
# so docker compose can be used also
|
||||
mkdir -p /usr/local/lib/docker/cli-plugins
|
||||
ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
else
|
||||
echo "Docker Compose already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
# Restart Docker and wait for it to start
|
||||
systemctl enable docker
|
||||
systemctl stop docker
|
||||
systemctl start docker
|
||||
wait_for_docker
|
||||
|
||||
# Create random temp directory
|
||||
TMP_DIR=$(mktemp -d)
|
||||
docker pull "${INSTALLER_IMAGE}"
|
||||
|
||||
# Generate installation scripts
|
||||
COMMON_DOCKER_OPTIONS="--network=host -v ${TMP_DIR}:/output \
|
||||
-e OPENVIDU_VERSION=$OPENVIDU_VERSION \
|
||||
-e CADDY_SERVER_IMAGE=$CADDY_SERVER_IMAGE \
|
||||
-e CADDY_SERVER_PRO_IMAGE=$CADDY_SERVER_PRO_IMAGE \
|
||||
-e MINIO_SERVER_IMAGE=$MINIO_SERVER_IMAGE \
|
||||
-e MINIO_CLIENT_IMAGE=$MINIO_CLIENT_IMAGE \
|
||||
-e MONGO_SERVER_IMAGE=$MONGO_SERVER_IMAGE \
|
||||
-e REDIS_SERVER_IMAGE=$REDIS_SERVER_IMAGE \
|
||||
-e BUSYBOX_IMAGE=$BUSYBOX_IMAGE \
|
||||
-e OPENVIDU_OPERATOR_IMAGE=$OPENVIDU_OPERATOR_IMAGE \
|
||||
-e OPENVIDU_SERVER_PRO_IMAGE=$OPENVIDU_SERVER_PRO_IMAGE \
|
||||
-e OPENVIDU_SERVER_IMAGE=$OPENVIDU_SERVER_IMAGE \
|
||||
-e OPENVIDU_CALL_SERVER_IMAGE=$OPENVIDU_CALL_SERVER_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_PRO_IMAGE=$OPENVIDU_DASHBOARD_PRO_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_IMAGE=$OPENVIDU_DASHBOARD_IMAGE \
|
||||
-e OPENVIDU_V2COMPATIBILITY_IMAGE=$OPENVIDU_V2COMPATIBILITY_IMAGE \
|
||||
-e LIVEKIT_INGRESS_SERVER_IMAGE=$LIVEKIT_INGRESS_SERVER_IMAGE \
|
||||
-e LIVEKIT_EGRESS_SERVER_IMAGE=$LIVEKIT_EGRESS_SERVER_IMAGE \
|
||||
-e PROMETHEUS_IMAGE=$PROMETHEUS_IMAGE \
|
||||
-e PROMTAIL_IMAGE=$PROMTAIL_IMAGE \
|
||||
-e LOKI_IMAGE=$LOKI_IMAGE \
|
||||
-e MIMIR_IMAGE=$MIMIR_IMAGE \
|
||||
-e GRAFANA_IMAGE=$GRAFANA_IMAGE \
|
||||
${INSTALLER_IMAGE} \
|
||||
--deployment-type=single_node \
|
||||
--install \
|
||||
$*"
|
||||
|
||||
INTERACTIVE_MODE=true
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--no-tty" ]; then
|
||||
INTERACTIVE_MODE=false;
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$INTERACTIVE_MODE" = true ]; then
|
||||
docker run -it ${COMMON_DOCKER_OPTIONS} > /dev/tty
|
||||
else
|
||||
docker run -i ${COMMON_DOCKER_OPTIONS}
|
||||
fi
|
||||
|
||||
cd "$TMP_DIR/installation-scripts/openvidu/"
|
||||
chmod +x install.sh
|
||||
./install.sh
|
||||
|
||||
cat finish-message.txt
|
||||
echo
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,173 @@
|
|||
#!/bin/sh
|
||||
# Docker & Docker Compose will need to be installed on the machine
|
||||
set -eu
|
||||
export DOCKER_VERSION="${DOCKER_VERSION:-27.5.1}"
|
||||
export DOCKER_COMPOSE_VERSION="${DOCKER_COMPOSE_VERSION:-v2.32.4}"
|
||||
export OPENVIDU_VERSION="${OPENVIDU_VERSION:-main}"
|
||||
export INSTALLER_IMAGE="${INSTALLER_IMAGE:-docker.io/openvidu/openvidu-installer:${OPENVIDU_VERSION}}"
|
||||
export MINIO_SERVER_IMAGE="${MINIO_SERVER_IMAGE:-docker.io/bitnami/minio:2025.2.7-debian-12-r0}"
|
||||
export MINIO_CLIENT_IMAGE="${MINIO_CLIENT_IMAGE:-docker.io/minio/mc:RELEASE.2025-02-08T19-14-21Z}"
|
||||
export MONGO_SERVER_IMAGE="${MONGO_SERVER_IMAGE:-docker.io/mongo:8.0.4}"
|
||||
export REDIS_SERVER_IMAGE="${REDIS_SERVER_IMAGE:-docker.io/redis:7.4.2-alpine}"
|
||||
export BUSYBOX_IMAGE="${BUSYBOX_IMAGE:-docker.io/busybox:1.37.0}"
|
||||
export CADDY_SERVER_IMAGE="${CADDY_SERVER_IMAGE:-docker.io/openvidu/openvidu-caddy:${OPENVIDU_VERSION}}"
|
||||
export CADDY_SERVER_PRO_IMAGE="${CADDY_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-caddy:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_OPERATOR_IMAGE="${OPENVIDU_OPERATOR_IMAGE:-docker.io/openvidu/openvidu-operator:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_PRO_IMAGE="${OPENVIDU_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-server-pro:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_IMAGE="${OPENVIDU_SERVER_IMAGE:-docker.io/openvidu/openvidu-server:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_CALL_SERVER_IMAGE="${OPENVIDU_CALL_SERVER_IMAGE:-docker.io/openvidu/openvidu-call:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_PRO_IMAGE="${OPENVIDU_DASHBOARD_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_IMAGE="${OPENVIDU_DASHBOARD_IMAGE:-docker.io/openvidu/openvidu-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_V2COMPATIBILITY_IMAGE="${OPENVIDU_V2COMPATIBILITY_IMAGE:-docker.io/openvidu/openvidu-v2compatibility:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_INGRESS_SERVER_IMAGE="${LIVEKIT_INGRESS_SERVER_IMAGE:-docker.io/openvidu/ingress:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_EGRESS_SERVER_IMAGE="${LIVEKIT_EGRESS_SERVER_IMAGE:-docker.io/livekit/egress:v1.9.0}"
|
||||
export PROMETHEUS_IMAGE="${PROMETHEUS_IMAGE:-docker.io/prom/prometheus:v3.1.0}"
|
||||
export PROMTAIL_IMAGE="${PROMTAIL_IMAGE:-docker.io/grafana/promtail:3.3.2}"
|
||||
export LOKI_IMAGE="${LOKI_IMAGE:-docker.io/grafana/loki:3.3.2}"
|
||||
export MIMIR_IMAGE="${MIMIR_IMAGE:-docker.io/bitnami/grafana-mimir:2.15.0}"
|
||||
export GRAFANA_IMAGE="${GRAFANA_IMAGE:-docker.io/grafana/grafana:11.5.1}"
|
||||
|
||||
wait_for_docker() {
|
||||
echo "Waiting for Docker to start..."
|
||||
|
||||
# Set a countdown (in seconds)
|
||||
COUNTDOWN=60
|
||||
|
||||
while [ "$COUNTDOWN" -gt 0 ]; do
|
||||
if docker info >/dev/null 2>&1; then
|
||||
echo "Docker started successfully."
|
||||
break
|
||||
else
|
||||
# Reduce the countdown by 1 each iteration.
|
||||
COUNTDOWN=$(( COUNTDOWN - 1 ))
|
||||
|
||||
if [ "$COUNTDOWN" -eq 0 ]; then
|
||||
echo "ERROR: Docker did not start within the allocated time."
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check if executing as root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker > /dev/null 2>&1
|
||||
then
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh --version "${DOCKER_VERSION}" || { echo "Can't install Docker automatically. Install it manually and run this script again"; exit 1; }
|
||||
else
|
||||
echo "Docker already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose > /dev/null 2>&1
|
||||
then
|
||||
TIME_LIMIT_SECONDS=20
|
||||
START_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
while true
|
||||
do
|
||||
CURRENT_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
if [ $((CURRENT_TIME-START_TIME)) -gt $TIME_LIMIT_SECONDS ]; then
|
||||
echo "Error downloading docker-compose. Could not download it in $TIME_LIMIT_SECONDS seconds"
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
exit 1
|
||||
fi
|
||||
STATUS_RECEIVED=$(curl --retry 5 --retry-max-time 40 --write-out "%{http_code}\n" -L "https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose)
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
echo "Error downloading docker-compose. curl failed with exit code $CURL_EXIT_CODE. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
if [ "${STATUS_RECEIVED}" -ne "200" ]; then
|
||||
echo "Error downloading docker-compose. Received HTTP status code $STATUS_RECEIVED. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
echo "Success downloading docker-compose"
|
||||
chmod 755 /usr/local/bin/docker-compose
|
||||
break
|
||||
done
|
||||
|
||||
# Create a symbolic link to docker-compose in the Docker CLI plugins directory
|
||||
# so docker compose can be used also
|
||||
mkdir -p /usr/local/lib/docker/cli-plugins
|
||||
ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
else
|
||||
echo "Docker Compose already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
# Restart Docker and wait for it to start
|
||||
systemctl enable docker
|
||||
systemctl stop docker
|
||||
systemctl start docker
|
||||
wait_for_docker
|
||||
|
||||
# Create random temp directory
|
||||
TMP_DIR=$(mktemp -d)
|
||||
docker pull "${INSTALLER_IMAGE}"
|
||||
|
||||
# Generate installation scripts
|
||||
COMMON_DOCKER_OPTIONS="--network=host -v ${TMP_DIR}:/output \
|
||||
-e OPENVIDU_VERSION=$OPENVIDU_VERSION \
|
||||
-e CADDY_SERVER_IMAGE=$CADDY_SERVER_IMAGE \
|
||||
-e CADDY_SERVER_PRO_IMAGE=$CADDY_SERVER_PRO_IMAGE \
|
||||
-e MINIO_SERVER_IMAGE=$MINIO_SERVER_IMAGE \
|
||||
-e MINIO_CLIENT_IMAGE=$MINIO_CLIENT_IMAGE \
|
||||
-e MONGO_SERVER_IMAGE=$MONGO_SERVER_IMAGE \
|
||||
-e REDIS_SERVER_IMAGE=$REDIS_SERVER_IMAGE \
|
||||
-e BUSYBOX_IMAGE=$BUSYBOX_IMAGE \
|
||||
-e OPENVIDU_OPERATOR_IMAGE=$OPENVIDU_OPERATOR_IMAGE \
|
||||
-e OPENVIDU_SERVER_PRO_IMAGE=$OPENVIDU_SERVER_PRO_IMAGE \
|
||||
-e OPENVIDU_SERVER_IMAGE=$OPENVIDU_SERVER_IMAGE \
|
||||
-e OPENVIDU_CALL_SERVER_IMAGE=$OPENVIDU_CALL_SERVER_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_PRO_IMAGE=$OPENVIDU_DASHBOARD_PRO_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_IMAGE=$OPENVIDU_DASHBOARD_IMAGE \
|
||||
-e OPENVIDU_V2COMPATIBILITY_IMAGE=$OPENVIDU_V2COMPATIBILITY_IMAGE \
|
||||
-e LIVEKIT_INGRESS_SERVER_IMAGE=$LIVEKIT_INGRESS_SERVER_IMAGE \
|
||||
-e LIVEKIT_EGRESS_SERVER_IMAGE=$LIVEKIT_EGRESS_SERVER_IMAGE \
|
||||
-e PROMETHEUS_IMAGE=$PROMETHEUS_IMAGE \
|
||||
-e PROMTAIL_IMAGE=$PROMTAIL_IMAGE \
|
||||
-e LOKI_IMAGE=$LOKI_IMAGE \
|
||||
-e MIMIR_IMAGE=$MIMIR_IMAGE \
|
||||
-e GRAFANA_IMAGE=$GRAFANA_IMAGE \
|
||||
${INSTALLER_IMAGE} \
|
||||
--deployment-type=elastic \
|
||||
--node-role=master-node \
|
||||
--install \
|
||||
$*"
|
||||
|
||||
INTERACTIVE_MODE=true
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--no-tty" ]; then
|
||||
INTERACTIVE_MODE=false;
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$INTERACTIVE_MODE" = true ]; then
|
||||
docker run -it ${COMMON_DOCKER_OPTIONS} > /dev/tty
|
||||
else
|
||||
docker run -i ${COMMON_DOCKER_OPTIONS}
|
||||
fi
|
||||
|
||||
cd "$TMP_DIR/installation-scripts/openvidu/"
|
||||
chmod +x install_ov_master_node.sh
|
||||
./install_ov_master_node.sh
|
||||
|
||||
cat finish-message.txt
|
||||
|
||||
# Warn about private IP being setup correctly
|
||||
echo
|
||||
echo "ATTENTION!!! This is the private IP of the 'Master Node'. Make sure this IP is reachable from all the 'Media Nodes'"
|
||||
cat private-ip.txt
|
||||
echo "If this is not your private IP, reinstall the 'Master Node' with the correct '--private-ip' parameter"
|
||||
|
||||
echo
|
|
@ -0,0 +1,172 @@
|
|||
#!/bin/sh
|
||||
# Docker & Docker Compose will need to be installed on the machine
|
||||
set -eu
|
||||
export DOCKER_VERSION="${DOCKER_VERSION:-27.5.1}"
|
||||
export DOCKER_COMPOSE_VERSION="${DOCKER_COMPOSE_VERSION:-v2.32.4}"
|
||||
export OPENVIDU_VERSION="${OPENVIDU_VERSION:-main}"
|
||||
export INSTALLER_IMAGE="${INSTALLER_IMAGE:-docker.io/openvidu/openvidu-installer:${OPENVIDU_VERSION}}"
|
||||
export MINIO_SERVER_IMAGE="${MINIO_SERVER_IMAGE:-docker.io/bitnami/minio:2025.2.7-debian-12-r0}"
|
||||
export MINIO_CLIENT_IMAGE="${MINIO_CLIENT_IMAGE:-docker.io/minio/mc:RELEASE.2025-02-08T19-14-21Z}"
|
||||
export MONGO_SERVER_IMAGE="${MONGO_SERVER_IMAGE:-docker.io/mongo:8.0.4}"
|
||||
export REDIS_SERVER_IMAGE="${REDIS_SERVER_IMAGE:-docker.io/redis:7.4.2-alpine}"
|
||||
export BUSYBOX_IMAGE="${BUSYBOX_IMAGE:-docker.io/busybox:1.37.0}"
|
||||
export CADDY_SERVER_IMAGE="${CADDY_SERVER_IMAGE:-docker.io/openvidu/openvidu-caddy:${OPENVIDU_VERSION}}"
|
||||
export CADDY_SERVER_PRO_IMAGE="${CADDY_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-caddy:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_OPERATOR_IMAGE="${OPENVIDU_OPERATOR_IMAGE:-docker.io/openvidu/openvidu-operator:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_PRO_IMAGE="${OPENVIDU_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-server-pro:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_IMAGE="${OPENVIDU_SERVER_IMAGE:-docker.io/openvidu/openvidu-server:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_CALL_SERVER_IMAGE="${OPENVIDU_CALL_SERVER_IMAGE:-docker.io/openvidu/openvidu-call:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_PRO_IMAGE="${OPENVIDU_DASHBOARD_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_IMAGE="${OPENVIDU_DASHBOARD_IMAGE:-docker.io/openvidu/openvidu-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_V2COMPATIBILITY_IMAGE="${OPENVIDU_V2COMPATIBILITY_IMAGE:-docker.io/openvidu/openvidu-v2compatibility:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_INGRESS_SERVER_IMAGE="${LIVEKIT_INGRESS_SERVER_IMAGE:-docker.io/openvidu/ingress:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_EGRESS_SERVER_IMAGE="${LIVEKIT_EGRESS_SERVER_IMAGE:-docker.io/livekit/egress:v1.9.0}"
|
||||
export PROMETHEUS_IMAGE="${PROMETHEUS_IMAGE:-docker.io/prom/prometheus:v3.1.0}"
|
||||
export PROMTAIL_IMAGE="${PROMTAIL_IMAGE:-docker.io/grafana/promtail:3.3.2}"
|
||||
export LOKI_IMAGE="${LOKI_IMAGE:-docker.io/grafana/loki:3.3.2}"
|
||||
export MIMIR_IMAGE="${MIMIR_IMAGE:-docker.io/bitnami/grafana-mimir:2.15.0}"
|
||||
export GRAFANA_IMAGE="${GRAFANA_IMAGE:-docker.io/grafana/grafana:11.5.1}"
|
||||
|
||||
wait_for_docker() {
|
||||
echo "Waiting for Docker to start..."
|
||||
|
||||
# Set a countdown (in seconds)
|
||||
COUNTDOWN=60
|
||||
|
||||
while [ "$COUNTDOWN" -gt 0 ]; do
|
||||
if docker info >/dev/null 2>&1; then
|
||||
echo "Docker started successfully."
|
||||
break
|
||||
else
|
||||
# Reduce the countdown by 1 each iteration.
|
||||
COUNTDOWN=$(( COUNTDOWN - 1 ))
|
||||
|
||||
if [ "$COUNTDOWN" -eq 0 ]; then
|
||||
echo "ERROR: Docker did not start within the allocated time."
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check if executing as root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker > /dev/null 2>&1
|
||||
then
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh --version "${DOCKER_VERSION}" || { echo "Can't install Docker automatically. Install it manually and run this script again"; exit 1; }
|
||||
else
|
||||
echo "Docker already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose > /dev/null 2>&1
|
||||
then
|
||||
TIME_LIMIT_SECONDS=20
|
||||
START_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
while true
|
||||
do
|
||||
CURRENT_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
if [ $((CURRENT_TIME-START_TIME)) -gt $TIME_LIMIT_SECONDS ]; then
|
||||
echo "Error downloading docker-compose. Could not download it in $TIME_LIMIT_SECONDS seconds"
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
exit 1
|
||||
fi
|
||||
STATUS_RECEIVED=$(curl --retry 5 --retry-max-time 40 --write-out "%{http_code}\n" -L "https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose)
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
echo "Error downloading docker-compose. curl failed with exit code $CURL_EXIT_CODE. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
if [ "${STATUS_RECEIVED}" -ne "200" ]; then
|
||||
echo "Error downloading docker-compose. Received HTTP status code $STATUS_RECEIVED. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
echo "Success downloading docker-compose"
|
||||
chmod 755 /usr/local/bin/docker-compose
|
||||
break
|
||||
done
|
||||
|
||||
# Create a symbolic link to docker-compose in the Docker CLI plugins directory
|
||||
# so docker compose can be used also
|
||||
mkdir -p /usr/local/lib/docker/cli-plugins
|
||||
ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
else
|
||||
echo "Docker Compose already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
# Restart Docker and wait for it to start
|
||||
systemctl enable docker
|
||||
systemctl stop docker
|
||||
systemctl start docker
|
||||
wait_for_docker
|
||||
|
||||
# Create random temp directory
|
||||
TMP_DIR=$(mktemp -d)
|
||||
docker pull "${INSTALLER_IMAGE}"
|
||||
|
||||
# Generate installation scripts
|
||||
COMMON_DOCKER_OPTIONS="--network=host -v ${TMP_DIR}:/output \
|
||||
-e OPENVIDU_VERSION=$OPENVIDU_VERSION \
|
||||
-e CADDY_SERVER_IMAGE=$CADDY_SERVER_IMAGE \
|
||||
-e CADDY_SERVER_PRO_IMAGE=$CADDY_SERVER_PRO_IMAGE \
|
||||
-e MINIO_SERVER_IMAGE=$MINIO_SERVER_IMAGE \
|
||||
-e MINIO_CLIENT_IMAGE=$MINIO_CLIENT_IMAGE \
|
||||
-e MONGO_SERVER_IMAGE=$MONGO_SERVER_IMAGE \
|
||||
-e REDIS_SERVER_IMAGE=$REDIS_SERVER_IMAGE \
|
||||
-e BUSYBOX_IMAGE=$BUSYBOX_IMAGE \
|
||||
-e OPENVIDU_OPERATOR_IMAGE=$OPENVIDU_OPERATOR_IMAGE \
|
||||
-e OPENVIDU_SERVER_PRO_IMAGE=$OPENVIDU_SERVER_PRO_IMAGE \
|
||||
-e OPENVIDU_SERVER_IMAGE=$OPENVIDU_SERVER_IMAGE \
|
||||
-e OPENVIDU_CALL_SERVER_IMAGE=$OPENVIDU_CALL_SERVER_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_PRO_IMAGE=$OPENVIDU_DASHBOARD_PRO_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_IMAGE=$OPENVIDU_DASHBOARD_IMAGE \
|
||||
-e OPENVIDU_V2COMPATIBILITY_IMAGE=$OPENVIDU_V2COMPATIBILITY_IMAGE \
|
||||
-e LIVEKIT_INGRESS_SERVER_IMAGE=$LIVEKIT_INGRESS_SERVER_IMAGE \
|
||||
-e LIVEKIT_EGRESS_SERVER_IMAGE=$LIVEKIT_EGRESS_SERVER_IMAGE \
|
||||
-e PROMETHEUS_IMAGE=$PROMETHEUS_IMAGE \
|
||||
-e PROMTAIL_IMAGE=$PROMTAIL_IMAGE \
|
||||
-e LOKI_IMAGE=$LOKI_IMAGE \
|
||||
-e MIMIR_IMAGE=$MIMIR_IMAGE \
|
||||
-e GRAFANA_IMAGE=$GRAFANA_IMAGE \
|
||||
${INSTALLER_IMAGE} \
|
||||
--deployment-type=elastic \
|
||||
--node-role=media-node \
|
||||
--install \
|
||||
$*"
|
||||
|
||||
INTERACTIVE_MODE=true
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--no-tty" ]; then
|
||||
INTERACTIVE_MODE=false;
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$INTERACTIVE_MODE" = true ]; then
|
||||
docker run -it ${COMMON_DOCKER_OPTIONS} > /dev/tty
|
||||
else
|
||||
docker run -i ${COMMON_DOCKER_OPTIONS}
|
||||
fi
|
||||
|
||||
cd "$TMP_DIR/installation-scripts/openvidu/"
|
||||
chmod +x install_ov_media_node.sh
|
||||
./install_ov_media_node.sh
|
||||
|
||||
cat finish-message.txt
|
||||
|
||||
# Warn about private IP being setup correctly
|
||||
echo
|
||||
echo "ATTENTION!!! This is the private IP of this 'Media Node'. Make sure this IP is reachable from the 'Master Node'"
|
||||
cat private-ip.txt
|
||||
echo "If this is not your private IP, reinstall the 'Media Node' with the correct '--private-ip' parameter"
|
||||
echo
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,165 @@
|
|||
#!/bin/sh
|
||||
# Docker & Docker Compose will need to be installed on the machine
|
||||
set -eu
|
||||
export DOCKER_VERSION="${DOCKER_VERSION:-27.5.1}"
|
||||
export DOCKER_COMPOSE_VERSION="${DOCKER_COMPOSE_VERSION:-v2.32.4}"
|
||||
export OPENVIDU_VERSION="${OPENVIDU_VERSION:-main}"
|
||||
export INSTALLER_IMAGE="${INSTALLER_IMAGE:-docker.io/openvidu/openvidu-installer:${OPENVIDU_VERSION}}"
|
||||
export MINIO_SERVER_IMAGE="${MINIO_SERVER_IMAGE:-docker.io/bitnami/minio:2025.2.7-debian-12-r0}"
|
||||
export MINIO_CLIENT_IMAGE="${MINIO_CLIENT_IMAGE:-docker.io/minio/mc:RELEASE.2025-02-08T19-14-21Z}"
|
||||
export MONGO_SERVER_IMAGE="${MONGO_SERVER_IMAGE:-docker.io/mongo:8.0.4}"
|
||||
export REDIS_SERVER_IMAGE="${REDIS_SERVER_IMAGE:-docker.io/redis:7.4.2-alpine}"
|
||||
export BUSYBOX_IMAGE="${BUSYBOX_IMAGE:-docker.io/busybox:1.37.0}"
|
||||
export CADDY_SERVER_IMAGE="${CADDY_SERVER_IMAGE:-docker.io/openvidu/openvidu-caddy:${OPENVIDU_VERSION}}"
|
||||
export CADDY_SERVER_PRO_IMAGE="${CADDY_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-caddy:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_OPERATOR_IMAGE="${OPENVIDU_OPERATOR_IMAGE:-docker.io/openvidu/openvidu-operator:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_PRO_IMAGE="${OPENVIDU_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-server-pro:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_IMAGE="${OPENVIDU_SERVER_IMAGE:-docker.io/openvidu/openvidu-server:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_CALL_SERVER_IMAGE="${OPENVIDU_CALL_SERVER_IMAGE:-docker.io/openvidu/openvidu-call:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_PRO_IMAGE="${OPENVIDU_DASHBOARD_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_IMAGE="${OPENVIDU_DASHBOARD_IMAGE:-docker.io/openvidu/openvidu-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_V2COMPATIBILITY_IMAGE="${OPENVIDU_V2COMPATIBILITY_IMAGE:-docker.io/openvidu/openvidu-v2compatibility:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_INGRESS_SERVER_IMAGE="${LIVEKIT_INGRESS_SERVER_IMAGE:-docker.io/openvidu/ingress:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_EGRESS_SERVER_IMAGE="${LIVEKIT_EGRESS_SERVER_IMAGE:-docker.io/livekit/egress:v1.9.0}"
|
||||
export PROMETHEUS_IMAGE="${PROMETHEUS_IMAGE:-docker.io/prom/prometheus:v3.1.0}"
|
||||
export PROMTAIL_IMAGE="${PROMTAIL_IMAGE:-docker.io/grafana/promtail:3.3.2}"
|
||||
export LOKI_IMAGE="${LOKI_IMAGE:-docker.io/grafana/loki:3.3.2}"
|
||||
export MIMIR_IMAGE="${MIMIR_IMAGE:-docker.io/bitnami/grafana-mimir:2.15.0}"
|
||||
export GRAFANA_IMAGE="${GRAFANA_IMAGE:-docker.io/grafana/grafana:11.5.1}"
|
||||
|
||||
wait_for_docker() {
|
||||
echo "Waiting for Docker to start..."
|
||||
|
||||
# Set a countdown (in seconds)
|
||||
COUNTDOWN=60
|
||||
|
||||
while [ "$COUNTDOWN" -gt 0 ]; do
|
||||
if docker info >/dev/null 2>&1; then
|
||||
echo "Docker started successfully."
|
||||
break
|
||||
else
|
||||
# Reduce the countdown by 1 each iteration.
|
||||
COUNTDOWN=$(( COUNTDOWN - 1 ))
|
||||
|
||||
if [ "$COUNTDOWN" -eq 0 ]; then
|
||||
echo "ERROR: Docker did not start within the allocated time."
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check if executing as root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker > /dev/null 2>&1
|
||||
then
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh --version "${DOCKER_VERSION}" || { echo "Can't install Docker automatically. Install it manually and run this script again"; exit 1; }
|
||||
else
|
||||
echo "Docker already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose > /dev/null 2>&1
|
||||
then
|
||||
TIME_LIMIT_SECONDS=20
|
||||
START_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
while true
|
||||
do
|
||||
CURRENT_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
if [ $((CURRENT_TIME-START_TIME)) -gt $TIME_LIMIT_SECONDS ]; then
|
||||
echo "Error downloading docker-compose. Could not download it in $TIME_LIMIT_SECONDS seconds"
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
exit 1
|
||||
fi
|
||||
STATUS_RECEIVED=$(curl --retry 5 --retry-max-time 40 --write-out "%{http_code}\n" -L "https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose)
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
echo "Error downloading docker-compose. curl failed with exit code $CURL_EXIT_CODE. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
if [ "${STATUS_RECEIVED}" -ne "200" ]; then
|
||||
echo "Error downloading docker-compose. Received HTTP status code $STATUS_RECEIVED. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
echo "Success downloading docker-compose"
|
||||
chmod 755 /usr/local/bin/docker-compose
|
||||
break
|
||||
done
|
||||
|
||||
# Create a symbolic link to docker-compose in the Docker CLI plugins directory
|
||||
# so docker compose can be used also
|
||||
mkdir -p /usr/local/lib/docker/cli-plugins
|
||||
ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
else
|
||||
echo "Docker Compose already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
# Restart Docker and wait for it to start
|
||||
systemctl enable docker
|
||||
systemctl stop docker
|
||||
systemctl start docker
|
||||
wait_for_docker
|
||||
|
||||
# Create random temp directory
|
||||
TMP_DIR=$(mktemp -d)
|
||||
docker pull "${INSTALLER_IMAGE}"
|
||||
|
||||
# Generate installation scripts
|
||||
COMMON_DOCKER_OPTIONS="--network=host -v ${TMP_DIR}:/output \
|
||||
-e OPENVIDU_VERSION=$OPENVIDU_VERSION \
|
||||
-e CADDY_SERVER_IMAGE=$CADDY_SERVER_IMAGE \
|
||||
-e CADDY_SERVER_PRO_IMAGE=$CADDY_SERVER_PRO_IMAGE \
|
||||
-e MINIO_SERVER_IMAGE=$MINIO_SERVER_IMAGE \
|
||||
-e MINIO_CLIENT_IMAGE=$MINIO_CLIENT_IMAGE \
|
||||
-e MONGO_SERVER_IMAGE=$MONGO_SERVER_IMAGE \
|
||||
-e REDIS_SERVER_IMAGE=$REDIS_SERVER_IMAGE \
|
||||
-e BUSYBOX_IMAGE=$BUSYBOX_IMAGE \
|
||||
-e OPENVIDU_OPERATOR_IMAGE=$OPENVIDU_OPERATOR_IMAGE \
|
||||
-e OPENVIDU_SERVER_PRO_IMAGE=$OPENVIDU_SERVER_PRO_IMAGE \
|
||||
-e OPENVIDU_SERVER_IMAGE=$OPENVIDU_SERVER_IMAGE \
|
||||
-e OPENVIDU_CALL_SERVER_IMAGE=$OPENVIDU_CALL_SERVER_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_PRO_IMAGE=$OPENVIDU_DASHBOARD_PRO_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_IMAGE=$OPENVIDU_DASHBOARD_IMAGE \
|
||||
-e OPENVIDU_V2COMPATIBILITY_IMAGE=$OPENVIDU_V2COMPATIBILITY_IMAGE \
|
||||
-e LIVEKIT_INGRESS_SERVER_IMAGE=$LIVEKIT_INGRESS_SERVER_IMAGE \
|
||||
-e LIVEKIT_EGRESS_SERVER_IMAGE=$LIVEKIT_EGRESS_SERVER_IMAGE \
|
||||
-e PROMETHEUS_IMAGE=$PROMETHEUS_IMAGE \
|
||||
-e PROMTAIL_IMAGE=$PROMTAIL_IMAGE \
|
||||
-e LOKI_IMAGE=$LOKI_IMAGE \
|
||||
-e MIMIR_IMAGE=$MIMIR_IMAGE \
|
||||
-e GRAFANA_IMAGE=$GRAFANA_IMAGE \
|
||||
${INSTALLER_IMAGE} \
|
||||
--deployment-type=ha \
|
||||
--node-role=master-node \
|
||||
--install \
|
||||
$*"
|
||||
|
||||
INTERACTIVE_MODE=true
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--no-tty" ]; then
|
||||
INTERACTIVE_MODE=false;
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$INTERACTIVE_MODE" = true ]; then
|
||||
docker run -it ${COMMON_DOCKER_OPTIONS} > /dev/tty
|
||||
else
|
||||
docker run -i ${COMMON_DOCKER_OPTIONS}
|
||||
fi
|
||||
|
||||
cd "$TMP_DIR/installation-scripts/openvidu/"
|
||||
chmod +x install_ov_master_node.sh
|
||||
./install_ov_master_node.sh
|
||||
|
||||
cat finish-message.txt
|
|
@ -0,0 +1,173 @@
|
|||
#!/bin/sh
|
||||
# Docker & Docker Compose will need to be installed on the machine
|
||||
set -eu
|
||||
export DOCKER_VERSION="${DOCKER_VERSION:-27.5.1}"
|
||||
export DOCKER_COMPOSE_VERSION="${DOCKER_COMPOSE_VERSION:-v2.32.4}"
|
||||
export OPENVIDU_VERSION="${OPENVIDU_VERSION:-main}"
|
||||
export INSTALLER_IMAGE="${INSTALLER_IMAGE:-docker.io/openvidu/openvidu-installer:${OPENVIDU_VERSION}}"
|
||||
export MINIO_SERVER_IMAGE="${MINIO_SERVER_IMAGE:-docker.io/bitnami/minio:2025.2.7-debian-12-r0}"
|
||||
export MINIO_CLIENT_IMAGE="${MINIO_CLIENT_IMAGE:-docker.io/minio/mc:RELEASE.2025-02-08T19-14-21Z}"
|
||||
export MONGO_SERVER_IMAGE="${MONGO_SERVER_IMAGE:-docker.io/mongo:8.0.4}"
|
||||
export REDIS_SERVER_IMAGE="${REDIS_SERVER_IMAGE:-docker.io/redis:7.4.2-alpine}"
|
||||
export BUSYBOX_IMAGE="${BUSYBOX_IMAGE:-docker.io/busybox:1.37.0}"
|
||||
export CADDY_SERVER_IMAGE="${CADDY_SERVER_IMAGE:-docker.io/openvidu/openvidu-caddy:${OPENVIDU_VERSION}}"
|
||||
export CADDY_SERVER_PRO_IMAGE="${CADDY_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-caddy:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_OPERATOR_IMAGE="${OPENVIDU_OPERATOR_IMAGE:-docker.io/openvidu/openvidu-operator:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_PRO_IMAGE="${OPENVIDU_SERVER_PRO_IMAGE:-docker.io/openvidu/openvidu-server-pro:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_SERVER_IMAGE="${OPENVIDU_SERVER_IMAGE:-docker.io/openvidu/openvidu-server:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_CALL_SERVER_IMAGE="${OPENVIDU_CALL_SERVER_IMAGE:-docker.io/openvidu/openvidu-call:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_PRO_IMAGE="${OPENVIDU_DASHBOARD_PRO_IMAGE:-docker.io/openvidu/openvidu-pro-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_DASHBOARD_IMAGE="${OPENVIDU_DASHBOARD_IMAGE:-docker.io/openvidu/openvidu-dashboard:${OPENVIDU_VERSION}}"
|
||||
export OPENVIDU_V2COMPATIBILITY_IMAGE="${OPENVIDU_V2COMPATIBILITY_IMAGE:-docker.io/openvidu/openvidu-v2compatibility:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_INGRESS_SERVER_IMAGE="${LIVEKIT_INGRESS_SERVER_IMAGE:-docker.io/openvidu/ingress:${OPENVIDU_VERSION}}"
|
||||
export LIVEKIT_EGRESS_SERVER_IMAGE="${LIVEKIT_EGRESS_SERVER_IMAGE:-docker.io/livekit/egress:v1.9.0}"
|
||||
export PROMETHEUS_IMAGE="${PROMETHEUS_IMAGE:-docker.io/prom/prometheus:v3.1.0}"
|
||||
export PROMTAIL_IMAGE="${PROMTAIL_IMAGE:-docker.io/grafana/promtail:3.3.2}"
|
||||
export LOKI_IMAGE="${LOKI_IMAGE:-docker.io/grafana/loki:3.3.2}"
|
||||
export MIMIR_IMAGE="${MIMIR_IMAGE:-docker.io/bitnami/grafana-mimir:2.15.0}"
|
||||
export GRAFANA_IMAGE="${GRAFANA_IMAGE:-docker.io/grafana/grafana:11.5.1}"
|
||||
|
||||
wait_for_docker() {
|
||||
echo "Waiting for Docker to start..."
|
||||
|
||||
# Set a countdown (in seconds)
|
||||
COUNTDOWN=60
|
||||
|
||||
while [ "$COUNTDOWN" -gt 0 ]; do
|
||||
if docker info >/dev/null 2>&1; then
|
||||
echo "Docker started successfully."
|
||||
break
|
||||
else
|
||||
# Reduce the countdown by 1 each iteration.
|
||||
COUNTDOWN=$(( COUNTDOWN - 1 ))
|
||||
|
||||
if [ "$COUNTDOWN" -eq 0 ]; then
|
||||
echo "ERROR: Docker did not start within the allocated time."
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check if executing as root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v docker > /dev/null 2>&1
|
||||
then
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh --version "${DOCKER_VERSION}" || { echo "Can't install Docker automatically. Install it manually and run this script again"; exit 1; }
|
||||
else
|
||||
echo "Docker already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
if ! command -v docker-compose > /dev/null 2>&1
|
||||
then
|
||||
TIME_LIMIT_SECONDS=20
|
||||
START_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
while true
|
||||
do
|
||||
CURRENT_TIME=$(awk 'BEGIN{srand(); print srand()}')
|
||||
if [ $((CURRENT_TIME-START_TIME)) -gt $TIME_LIMIT_SECONDS ]; then
|
||||
echo "Error downloading docker-compose. Could not download it in $TIME_LIMIT_SECONDS seconds"
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
exit 1
|
||||
fi
|
||||
STATUS_RECEIVED=$(curl --retry 5 --retry-max-time 40 --write-out "%{http_code}\n" -L "https://github.com/docker/compose/releases/download/$DOCKER_COMPOSE_VERSION/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose)
|
||||
CURL_EXIT_CODE=$?
|
||||
if [ $CURL_EXIT_CODE -ne 0 ]; then
|
||||
echo "Error downloading docker-compose. curl failed with exit code $CURL_EXIT_CODE. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
if [ "${STATUS_RECEIVED}" -ne "200" ]; then
|
||||
echo "Error downloading docker-compose. Received HTTP status code $STATUS_RECEIVED. There are still $((TIME_LIMIT_SECONDS - (CURRENT_TIME - START_TIME))) seconds left to retry..."
|
||||
rm -rf /usr/local/bin/docker-compose
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
echo "Success downloading docker-compose"
|
||||
chmod 755 /usr/local/bin/docker-compose
|
||||
break
|
||||
done
|
||||
|
||||
# Create a symbolic link to docker-compose in the Docker CLI plugins directory
|
||||
# so docker compose can be used also
|
||||
mkdir -p /usr/local/lib/docker/cli-plugins
|
||||
ln -s /usr/local/bin/docker-compose /usr/local/lib/docker/cli-plugins/docker-compose
|
||||
else
|
||||
echo "Docker Compose already installed. Check you have the latest version for best compatibility"
|
||||
fi
|
||||
|
||||
# Restart Docker and wait for it to start
|
||||
systemctl enable docker
|
||||
systemctl stop docker
|
||||
systemctl start docker
|
||||
wait_for_docker
|
||||
|
||||
# Create random temp directory
|
||||
TMP_DIR=$(mktemp -d)
|
||||
docker pull "${INSTALLER_IMAGE}"
|
||||
|
||||
# Generate installation scripts
|
||||
COMMON_DOCKER_OPTIONS="--network=host -v ${TMP_DIR}:/output \
|
||||
-e OPENVIDU_VERSION=$OPENVIDU_VERSION \
|
||||
-e CADDY_SERVER_IMAGE=$CADDY_SERVER_IMAGE \
|
||||
-e CADDY_SERVER_PRO_IMAGE=$CADDY_SERVER_PRO_IMAGE \
|
||||
-e MINIO_SERVER_IMAGE=$MINIO_SERVER_IMAGE \
|
||||
-e MINIO_CLIENT_IMAGE=$MINIO_CLIENT_IMAGE \
|
||||
-e MONGO_SERVER_IMAGE=$MONGO_SERVER_IMAGE \
|
||||
-e REDIS_SERVER_IMAGE=$REDIS_SERVER_IMAGE \
|
||||
-e BUSYBOX_IMAGE=$BUSYBOX_IMAGE \
|
||||
-e OPENVIDU_OPERATOR_IMAGE=$OPENVIDU_OPERATOR_IMAGE \
|
||||
-e OPENVIDU_SERVER_PRO_IMAGE=$OPENVIDU_SERVER_PRO_IMAGE \
|
||||
-e OPENVIDU_SERVER_IMAGE=$OPENVIDU_SERVER_IMAGE \
|
||||
-e OPENVIDU_CALL_SERVER_IMAGE=$OPENVIDU_CALL_SERVER_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_PRO_IMAGE=$OPENVIDU_DASHBOARD_PRO_IMAGE \
|
||||
-e OPENVIDU_DASHBOARD_IMAGE=$OPENVIDU_DASHBOARD_IMAGE \
|
||||
-e OPENVIDU_V2COMPATIBILITY_IMAGE=$OPENVIDU_V2COMPATIBILITY_IMAGE \
|
||||
-e LIVEKIT_INGRESS_SERVER_IMAGE=$LIVEKIT_INGRESS_SERVER_IMAGE \
|
||||
-e LIVEKIT_EGRESS_SERVER_IMAGE=$LIVEKIT_EGRESS_SERVER_IMAGE \
|
||||
-e PROMETHEUS_IMAGE=$PROMETHEUS_IMAGE \
|
||||
-e PROMTAIL_IMAGE=$PROMTAIL_IMAGE \
|
||||
-e LOKI_IMAGE=$LOKI_IMAGE \
|
||||
-e MIMIR_IMAGE=$MIMIR_IMAGE \
|
||||
-e GRAFANA_IMAGE=$GRAFANA_IMAGE \
|
||||
${INSTALLER_IMAGE} \
|
||||
--deployment-type=ha \
|
||||
--node-role=media-node \
|
||||
--install \
|
||||
$*"
|
||||
|
||||
INTERACTIVE_MODE=true
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--no-tty" ]; then
|
||||
INTERACTIVE_MODE=false;
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$INTERACTIVE_MODE" = true ]; then
|
||||
docker run -it ${COMMON_DOCKER_OPTIONS} > /dev/tty
|
||||
else
|
||||
docker run -i ${COMMON_DOCKER_OPTIONS}
|
||||
fi
|
||||
|
||||
cd "$TMP_DIR/installation-scripts/openvidu/"
|
||||
chmod +x install_ov_media_node.sh
|
||||
./install_ov_media_node.sh
|
||||
|
||||
cat finish-message.txt
|
||||
|
||||
# Warn about private IP being setup correctly
|
||||
echo
|
||||
echo "ATTENTION!!! This is the private IP of this 'Media Node'. Make sure this IP is reachable from all the 'Master Node'"
|
||||
cat private-ip.txt
|
||||
echo "If this is not your private IP, reinstall the 'Media Node' with the correct '--private-ip' parameter"
|
||||
|
||||
echo
|
|
@ -0,0 +1,166 @@
|
|||
<#
|
||||
.DESCRIPTION
|
||||
A runbook that will scale in the Media Nodes gracefully in OpenVidu
|
||||
|
||||
.NOTES
|
||||
AUTHOR: Sergio Fernández Gómez
|
||||
LAST EDIT: March 24, 2025
|
||||
#>
|
||||
param
|
||||
(
|
||||
[Parameter (Mandatory=$false)]
|
||||
[object] $WebhookData
|
||||
)
|
||||
$ErrorActionPreference = "stop"
|
||||
|
||||
if (!($WebhookData)) {
|
||||
Write-Error "This runbook is meant to be started from an Azure alert webhook only."
|
||||
exit
|
||||
}
|
||||
|
||||
# Get the data object from WebhookData
|
||||
$WebhookBody = (ConvertFrom-Json -InputObject $WebhookData.RequestBody)
|
||||
# Get the info needed to identify the VM (depends on the payload schema)
|
||||
$schemaId = $WebhookBody.schemaId
|
||||
|
||||
# Check if the schemaId is the one we can manage
|
||||
if (!($schemaId -eq "Microsoft.Insights/activityLogs")) {
|
||||
Write-Error "The alert data schema - $schemaId - is not supported."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# This is the Activity Log Alert schema
|
||||
$AlertContext = [object] (($WebhookBody.data).context).activityLog
|
||||
$ResourceGroupName = $AlertContext.resourceGroupName
|
||||
$ResourceType = $AlertContext.resourceType
|
||||
$SubscriptionId = $AlertContext.subscriptionId
|
||||
$ResourceName = (($AlertContext.resourceId).Split("/"))[-1]
|
||||
$status = ($WebhookBody.data).status
|
||||
|
||||
# Check if the status is not activated to leave the runbook
|
||||
if (!($status -eq "Activated")) {
|
||||
Write-Error "No action taken. Alert status: $status"
|
||||
exit 1
|
||||
}
|
||||
# Determine code path depending on the resourceType
|
||||
if (!($ResourceType -eq "Microsoft.Compute/virtualMachineScaleSets")) {
|
||||
Write-Error "$ResourceType is not a supported resource type for this runbook."
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Ensures you do not inherit an AzContext in your runbook
|
||||
Disable-AzContextAutosave -Scope Process
|
||||
|
||||
#Login into azure
|
||||
try {
|
||||
# Connect to Azure with system-assigned managed identity
|
||||
$AzureContext = (Connect-AzAccount -Identity).context
|
||||
# set and store context
|
||||
$AzureContext = Set-AzContext -SubscriptionName $AzureContext.Subscription -DefaultProfile $AzureContext
|
||||
}
|
||||
catch {
|
||||
Write-Error -Message $_.Exception
|
||||
throw $_.Exception
|
||||
}
|
||||
|
||||
#################################################################################################################
|
||||
#Here the runbook is logged in azure and nothing else is done
|
||||
#################################################################################################################
|
||||
|
||||
|
||||
######################################## LOCK ##########################################
|
||||
Import-Module Az.Storage
|
||||
$VMSS = Get-AzVmss -ResourceGroupName $ResourceGroupName -VMScaleSetName $ResourceName
|
||||
$StorageAccountName = $VMSS.Tags["storageAccount"]
|
||||
$StorageAccountKey = (Get-AzStorageAccountKey -ResourceGroupName $ResourceGroupName -StorageAccountName $StorageAccountName)[0].Value
|
||||
$Context = New-AzStorageContext -StorageAccountName $StorageAccountName -StorageAccountKey $StorageAccountKey
|
||||
#$blob = Get-AzureStorageBlob -Context $storageContext -Container $ContainerName -Blob $BlobName -ErrorAction Stop
|
||||
#$leaseStatus = $blob.ICloudBlob.Properties.LeaseStatus;
|
||||
#If($leaseStatus -eq "Locked")
|
||||
#{
|
||||
# $blob.ICloudBlob.BreakLease()
|
||||
# Write-Host "Successfully broken lease on '$BlobName' blob."
|
||||
#}
|
||||
$Lease = az storage blob lease acquire -b "lock.txt" -c "automation-locks" --account-name $StorageAccountName --account-key $StorageAccountKey
|
||||
|
||||
if (-not $Lease) {
|
||||
Write-Output "Lock is already held. Exiting."
|
||||
exit 0
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
######################################## CHECKS ##########################################
|
||||
|
||||
#Get the timestamp of the event that triggered the runbook
|
||||
$EventTimestamp = $WebhookBody.data.context.activityLog.eventTimestamp
|
||||
|
||||
$DateTag = [datetime]$VMSS.Tags["InstanceDeleteTime"]
|
||||
$DateEventTimestamp = [datetime]$EventTimeStamp
|
||||
|
||||
"Checking if the event was launched before the last instance was deleted"
|
||||
if ($DateEventTimestamp -lt $DateTag) {
|
||||
Write-Output "The event was launched before the last instance was deleted. Exiting..."
|
||||
exit 1
|
||||
}
|
||||
"Done checking"
|
||||
|
||||
|
||||
# Get the instances and select the index 0 instance to check if runcommand is running on it and later invoke the run command
|
||||
$InstancesInVMSS = Get-AzVmssVM -ResourceGroupName $ResourceGroupName -VMScaleSetName $ResourceName
|
||||
$InstanceCount = $InstancesInVMSS.Count
|
||||
|
||||
"Checking if theres more than 1 instance in the VMSS"
|
||||
if ($InstanceCount -le 1) {
|
||||
"There is only one instance in the VMSS. Exiting..."
|
||||
exit 1 # Exit the script if there is only one instance
|
||||
}
|
||||
|
||||
|
||||
# Check the tags in the VMSS to see if there is a tag with value TERMINATING
|
||||
"Checking TAG for TERMINATING"
|
||||
if($VMSS.Tags.Values -contains "TERMINATING"){
|
||||
"Found 'TERMINATING' tag so this runbook will not execute."
|
||||
exit 1
|
||||
}
|
||||
|
||||
######################################## MODIFIYING ##########################################
|
||||
|
||||
$VMSS.Tags["STATUS"] = "TERMINATING"
|
||||
"Terminating not found changing TAG"
|
||||
Set-AzResource -ResourceId $VMSS.Id -Tag $VMSS.Tags -Force
|
||||
"TAG updated"
|
||||
|
||||
# If no VM has been selected previously, select the VM with instance_id 0 and tag it as TERMINATING instance
|
||||
$InstanceId = $InstancesInVMSS[0].InstanceId
|
||||
|
||||
"Checking if one Run Command is executing"
|
||||
|
||||
# Iterate through each instance and check if RunCommand is still running
|
||||
foreach ($Instance in $InstancesInVMSS) {
|
||||
$runCommandStatus = Get-AzVmssVMRunCommand -ResourceGroupName $ResourceGroupName -VMScaleSetName $ResourceName -InstanceId $Instance.InstanceId
|
||||
|
||||
# Check if the RunCommand is still running
|
||||
if ($runCommandStatus.ProvisioningState -eq "Running") {
|
||||
Write-Output "Instance $($Instance.InstanceId) is still running a command. Exiting..."
|
||||
exit 1 # Exit the script if any instance is still running the command
|
||||
}
|
||||
}
|
||||
"Done checking"
|
||||
|
||||
"Sending RunCommand"
|
||||
$Token = (Get-AzAccessToken).Token
|
||||
$Uri = "https://management.azure.com/subscriptions/$SubscriptionId/resourceGroups/$ResourceGroupName/providers/Microsoft.Compute/virtualMachineScaleSets/$ResourceName/virtualMachines/$InstanceId/runCommand?api-version=2021-11-01"
|
||||
|
||||
$Body = @{
|
||||
commandId = 'RunShellScript'
|
||||
script = @('sudo /usr/local/bin/stop_media_node.sh')
|
||||
} | ConvertTo-Json -Depth 3
|
||||
|
||||
Invoke-RestMethod -Uri $Uri -Method POST -Headers @{ Authorization = "Bearer $Token" } -Body $Body -ContentType "application/json"
|
||||
"RunCommand sent"
|
||||
}
|
||||
finally
|
||||
{
|
||||
az storage blob lease release -b "lock.txt" -c "automation-locks" --account-name $StorageAccountName --account-key $StorageAccountKey --lease-id $Lease
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"automationAccountName": {
|
||||
"type": "String",
|
||||
"metadata": {
|
||||
"description": "Automation account name"
|
||||
}
|
||||
},
|
||||
"webhookName": {
|
||||
"type": "String",
|
||||
"metadata": {
|
||||
"description": "Webhook Name"
|
||||
}
|
||||
},
|
||||
"runbookName": {
|
||||
"type": "String",
|
||||
"metadata": {
|
||||
"description": "Runbook Name for which webhook will be created"
|
||||
}
|
||||
},
|
||||
"WebhookExpiryTime": {
|
||||
"type": "String",
|
||||
"metadata": {
|
||||
"description": "Webhook Expiry time"
|
||||
}
|
||||
},
|
||||
"_artifactsLocation": {
|
||||
"defaultValue": "https://raw.githubusercontent.com/Piwccle/AzureScaleIn/refs/heads/main/scaleInRunbook.ps1",
|
||||
"type": "String",
|
||||
"metadata": {
|
||||
"description": "URI to artifacts location"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"type": "Microsoft.Automation/automationAccounts",
|
||||
"apiVersion": "2020-01-13-preview",
|
||||
"name": "[parameters('automationAccountName')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"identity": {
|
||||
"type": "SystemAssigned"
|
||||
},
|
||||
"properties": {
|
||||
"sku": {
|
||||
"name": "Basic"
|
||||
}
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"type": "runbooks",
|
||||
"apiVersion": "2018-06-30",
|
||||
"name": "[parameters('runbookName')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[parameters('automationAccountName')]"
|
||||
],
|
||||
"properties": {
|
||||
"runbookType": "PowerShell72",
|
||||
"logProgress": "true",
|
||||
"description": "Scale In Runbook",
|
||||
"publishContentLink": {
|
||||
"uri": "[parameters('_artifactsLocation')]",
|
||||
"version": "1.0.0.0"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "webhooks",
|
||||
"apiVersion": "2018-06-30",
|
||||
"name": "[parameters('webhookName')]",
|
||||
"dependsOn": [
|
||||
"[parameters('automationAccountName')]",
|
||||
"[parameters('runbookName')]"
|
||||
],
|
||||
"properties": {
|
||||
"isEnabled": true,
|
||||
"expiryTime": "[parameters('WebhookExpiryTime')]",
|
||||
"runbook": {
|
||||
"name": "[parameters('runbookName')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "Microsoft.Authorization/roleAssignments",
|
||||
"apiVersion": "2022-04-01",
|
||||
"name": "[guid(format('roleAutomationContributorAssignmentAutomationAccount{0}', parameters('automationAccountName')))]",
|
||||
"properties": {
|
||||
"roleDefinitionId": "[subscriptionResourceId('Microsoft.Authorization/roleDefinitions', 'b24988ac-6180-42a0-ab88-20f7382dd24c')]",
|
||||
"principalId": "[reference(resourceId('Microsoft.Automation/automationAccounts', parameters('automationAccountName')), '2023-11-01', 'full').identity.principalId]",
|
||||
"principalType": "ServicePrincipal"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[resourceId('Microsoft.Automation/automationAccounts', parameters('automationAccountName'))]"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"webhookUri": {
|
||||
"type": "String",
|
||||
"value": "[reference(parameters('webhookName')).uri]"
|
||||
},
|
||||
"automationAccountId": {
|
||||
"type": "string",
|
||||
"value": "[resourceId('Microsoft.Automation/automationAccounts', parameters('automationAccountName'))]"
|
||||
},
|
||||
"webhookId": {
|
||||
"type": "string",
|
||||
"value": "[resourceId('Microsoft.Automation/automationAccounts/webhooks', parameters('automationAccountName'), parameters('webhookName'))]"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
#!/bin/sh
|
||||
set -eu
|
||||
export INSTALL_PREFIX="${INSTALL_PREFIX:-/opt/openvidu}"
|
||||
export OPENVIDU_VERSION="${OPENVIDU_VERSION:-main}"
|
||||
export REGISTRY="${REGISTRY:-docker.io}"
|
||||
export UPDATER_IMAGE="${UPDATER_IMAGE:-${REGISTRY}/openvidu/openvidu-updater:${OPENVIDU_VERSION}}"
|
||||
|
||||
# Check if executing as root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if docker is installed
|
||||
if ! command -v docker > /dev/null 2>&1; then
|
||||
echo "Docker is not installed. Please install Docker and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if file /opt/openvidu/deployment-info.yaml exists
|
||||
if ! [ -f /opt/openvidu/deployment-info.yaml ]; then
|
||||
echo "OpenVidu is not installed. Please install OpenVidu and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Stop OpenVidu service
|
||||
echo "Stopping OpenVidu service..."
|
||||
systemctl stop openvidu
|
||||
|
||||
# Pull updater image
|
||||
docker pull "${UPDATER_IMAGE}"
|
||||
|
||||
# Temporary directory for post-update script
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
# Generate installation scripts
|
||||
COMMON_DOCKER_OPTIONS="--network=host \
|
||||
-v ${INSTALL_PREFIX}:${INSTALL_PREFIX} \
|
||||
-v ${TMP_DIR}:${TMP_DIR} \
|
||||
${UPDATER_IMAGE} \
|
||||
--docker-registry=${REGISTRY} \
|
||||
--install-prefix=${INSTALL_PREFIX} \
|
||||
--post-update-script="${TMP_DIR}/post-update.sh" \
|
||||
$*"
|
||||
|
||||
INTERACTIVE_MODE=true
|
||||
for arg in "$@"; do
|
||||
if [ "$arg" = "--no-tty" ]; then
|
||||
INTERACTIVE_MODE=false;
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$INTERACTIVE_MODE" = true ]; then
|
||||
docker run -it ${COMMON_DOCKER_OPTIONS} > /dev/tty
|
||||
else
|
||||
docker run -i ${COMMON_DOCKER_OPTIONS}
|
||||
fi
|
||||
|
||||
if [ -f "${TMP_DIR}/post-update.sh" ]; then
|
||||
chmod +x "${TMP_DIR}/post-update.sh"
|
||||
"${TMP_DIR}/post-update.sh"
|
||||
fi
|
Loading…
Reference in New Issue