Cloud Runner v2 (#310)
This commit is contained in:
69
dist/bootstrapper/ApplyClusterAndAcquireLock.sh
vendored
69
dist/bootstrapper/ApplyClusterAndAcquireLock.sh
vendored
@@ -1,69 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This creates a GKE Cluster
|
||||
# - Will wait for any deletion to complete on a cluster with the same name before creating
|
||||
# - Will wait for completion before continuing
|
||||
# - If the script is run concurrently multiple times, only one cluster will be created, all instances will wait for availability
|
||||
# Requires GCP Cloud SDK
|
||||
# Installs retry https://github.com/kadwanev/retry
|
||||
|
||||
GKE_PROJECT=$1
|
||||
GKE_CLUSTER=$2
|
||||
GKE_ZONE=$3
|
||||
|
||||
# may update this to avoid repeated install, drop me a comment if needed
|
||||
sudo sh -c "curl https://raw.githubusercontent.com/kadwanev/retry/master/retry -o /usr/local/bin/retry && chmod +x /usr/local/bin/retry"
|
||||
|
||||
attempts=0
|
||||
while [ $attempts -le 1 ]
|
||||
do
|
||||
retry -s 15 -t 20 -v '
|
||||
STATUS=$(gcloud container clusters list --format="json" --project $GKE_PROJECT |
|
||||
jq "
|
||||
.[] |
|
||||
{name: .name, status: .status} |
|
||||
select(.name == \"$GKE_CLUSTER\")
|
||||
" |
|
||||
jq ".status")
|
||||
if [ "$STATUS" == "\"STOPPING\"" ]; then echo "Cluster stopping waiting for completion" && exit 1; fi
|
||||
exit 0
|
||||
'
|
||||
cluster=$(gcloud container clusters list --project $GKE_PROJECT --format="json" | jq '.[] | select(.name == "${GKE_CLUSTER}")')
|
||||
|
||||
if [ -z "$cluster" ];
|
||||
then
|
||||
echo "No clusters found for \"$GKE_CLUSTER\" in project \"$GKE_CLUSTER\" in zone \"$GKE_ZONE\""
|
||||
# you may not need this, it installs GCP beta for additional command line options
|
||||
gcloud components install beta -q
|
||||
# replace this line with whatever type of cluster you want to create
|
||||
gcloud beta container --project $GKE_PROJECT clusters create $GKE_CLUSTER --zone $GKE_ZONE --no-enable-basic-auth --cluster-version "1.15.12-gke.2" --machine-type "custom-1-3072" --image-type "COS" --disk-type "pd-standard" --disk-size "15" --metadata disable-legacy-endpoints=true --scopes "https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" --num-nodes "1" --enable-stackdriver-kubernetes --enable-ip-alias --default-max-pods-per-node "110" --enable-autoscaling --min-nodes "0" --max-nodes "3" --no-enable-master-authorized-networks --addons HorizontalPodAutoscaling,HttpLoadBalancing --enable-autoupgrade --enable-autorepair --max-surge-upgrade 1 --max-unavailable-upgrade 0
|
||||
fi;
|
||||
retry -s 15 -t 20 -v '
|
||||
STATUS=$(gcloud container clusters list --format="json" --project $GKE_PROJECT |
|
||||
jq "
|
||||
.[] |
|
||||
{name: .name, status: .status} |
|
||||
select(.name == \"$GKE_CLUSTER\")
|
||||
" |
|
||||
jq ".status")
|
||||
if [ "$STATUS" == "\"PROVISIONING\"" ]; then echo "Cluster provisioning waiting for available" && exit 1; fi
|
||||
exit 0
|
||||
'
|
||||
echo "Cluster is available"
|
||||
gcloud container clusters get-credentials $GKE_CLUSTER --zone $GKE_ZONE --project $GKE_PROJECT
|
||||
kubectl version
|
||||
NSID=$(cat /proc/sys/kernel/random/uuid)
|
||||
echo "::set-env name=NSID::"$NSID
|
||||
{
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ns-unity-builder-$NSID
|
||||
labels:
|
||||
app: unity-builder
|
||||
EOF
|
||||
} && exit 0
|
||||
|
||||
attempts=$(($attempts+1))
|
||||
done
|
||||
@@ -1,13 +0,0 @@
|
||||
kubectl delete ns ns-unity-builder-$NSID
|
||||
|
||||
# do any unity-builder namespaces remain?
|
||||
namespaceCount=$(kubectl get ns --output json | jq ".items | .[] | select(.metadata.labels.app == \"unity-builder\") | select(.status.phase != \"TERMINATING\")" | jq -s "length")
|
||||
echo $namespaceCount
|
||||
if [ "$namespaceCount" != "0" ]
|
||||
then
|
||||
echo "let next cluster delete"
|
||||
exit 0
|
||||
else
|
||||
echo "delete cluster"
|
||||
retry -s 15 -t 5 -v 'gcloud container clusters delete $GKE_CLUSTER --zone $GKE_ZONE --project $GKE_PROJECT --quiet'
|
||||
fi
|
||||
17
dist/cloud-formations/base-setup.yml
vendored
17
dist/cloud-formations/base-setup.yml
vendored
@@ -7,6 +7,9 @@ Parameters:
|
||||
Type: String
|
||||
Default: development
|
||||
Description: "Your deployment environment: DEV, QA , PROD"
|
||||
Version:
|
||||
Type: String
|
||||
Description: "hash of template"
|
||||
|
||||
# ContainerPort:
|
||||
# Type: Number
|
||||
@@ -231,10 +234,6 @@ Resources:
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
# Allow upload to S3
|
||||
- 's3:GetObject'
|
||||
- 's3:GetObjectVersion'
|
||||
- 's3:PutObject'
|
||||
|
||||
# Allow the use of secret manager
|
||||
- 'secretsmanager:GetSecretValue'
|
||||
@@ -250,7 +249,7 @@ Resources:
|
||||
- 'logs:CreateLogStream'
|
||||
- 'logs:PutLogEvents'
|
||||
Resource: '*'
|
||||
|
||||
|
||||
DeleteCFNLambdaExecutionRole:
|
||||
Type: "AWS::IAM::Role"
|
||||
Properties:
|
||||
@@ -348,12 +347,6 @@ Resources:
|
||||
SecurityGroups:
|
||||
- !Ref EFSServerSecurityGroup
|
||||
|
||||
S3Bucket:
|
||||
Type: 'AWS::S3::Bucket'
|
||||
DeletionPolicy: Retain
|
||||
Properties:
|
||||
BucketName: game-ci-storage
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -388,7 +381,7 @@ Outputs:
|
||||
Value: !GetAtt 'ECSTaskExecutionRole.Arn'
|
||||
Export:
|
||||
Name: !Sub ${EnvironmentName}:ECSTaskExecutionRole
|
||||
|
||||
|
||||
DeleteCFNLambdaExecutionRole:
|
||||
Description: Lambda execution role for cleaning up cloud formations
|
||||
Value: !GetAtt 'DeleteCFNLambdaExecutionRole.Arn'
|
||||
|
||||
@@ -18,7 +18,7 @@ Parameters:
|
||||
Type: String
|
||||
Default: development
|
||||
Description: 'Your deployment environment: DEV, QA , PROD'
|
||||
BUILDID:
|
||||
BUILDGUID:
|
||||
Type: String
|
||||
Default: ''
|
||||
StackName:
|
||||
@@ -34,16 +34,16 @@ Resources:
|
||||
DeleteCFNLambda:
|
||||
Type: "AWS::Lambda::Function"
|
||||
Properties:
|
||||
FunctionName: !Join [ "", [ 'DeleteCFNLambda', !Ref BUILDID ] ]
|
||||
FunctionName: !Join [ "", [ 'DeleteCFNLambda', !Ref BUILDGUID ] ]
|
||||
Code:
|
||||
ZipFile: |
|
||||
import boto3
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
stack_name = os.environ['stackName']
|
||||
delete_stack_name = os.environ['deleteStackName']
|
||||
|
||||
|
||||
def delete_cfn(stack_name):
|
||||
try:
|
||||
cfn = boto3.resource('cloudformation')
|
||||
@@ -51,8 +51,8 @@ Resources:
|
||||
stack.delete()
|
||||
return "SUCCESS"
|
||||
except:
|
||||
return "ERROR"
|
||||
|
||||
return "ERROR"
|
||||
|
||||
def handler(event, context):
|
||||
print("Received event:")
|
||||
print(json.dumps(event))
|
||||
@@ -66,7 +66,7 @@ Resources:
|
||||
Handler: "index.handler"
|
||||
Runtime: "python3.6"
|
||||
Timeout: "5"
|
||||
Role:
|
||||
Role:
|
||||
'Fn::ImportValue': !Sub '${EnvironmentName}:DeleteCFNLambdaExecutionRole'
|
||||
DeleteStackEventRule:
|
||||
DependsOn:
|
||||
@@ -74,27 +74,27 @@ Resources:
|
||||
- GenerateCronExpression
|
||||
Type: "AWS::Events::Rule"
|
||||
Properties:
|
||||
Name: !Join [ "", [ 'DeleteStackEventRule', !Ref BUILDID ] ]
|
||||
Name: !Join [ "", [ 'DeleteStackEventRule', !Ref BUILDGUID ] ]
|
||||
Description: Delete stack event
|
||||
ScheduleExpression: !GetAtt GenerateCronExpression.cron_exp
|
||||
State: "ENABLED"
|
||||
Targets:
|
||||
-
|
||||
Targets:
|
||||
-
|
||||
Arn: !GetAtt DeleteCFNLambda.Arn
|
||||
Id: 'DeleteCFNLambda'
|
||||
PermissionForDeleteCFNLambda:
|
||||
Id: 'DeleteCFNLambda'
|
||||
PermissionForDeleteCFNLambda:
|
||||
Type: "AWS::Lambda::Permission"
|
||||
DependsOn:
|
||||
- DeleteStackEventRule
|
||||
Properties:
|
||||
FunctionName: !Join [ "", [ 'DeleteCFNLambda', !Ref BUILDID ] ]
|
||||
Properties:
|
||||
FunctionName: !Join [ "", [ 'DeleteCFNLambda', !Ref BUILDGUID ] ]
|
||||
Action: "lambda:InvokeFunction"
|
||||
Principal: "events.amazonaws.com"
|
||||
SourceArn: !GetAtt DeleteStackEventRule.Arn
|
||||
GenerateCronExpLambda:
|
||||
Type: "AWS::Lambda::Function"
|
||||
Properties:
|
||||
FunctionName: !Join [ "", [ 'GenerateCronExpressionLambda', !Ref BUILDID ] ]
|
||||
FunctionName: !Join [ "", [ 'GenerateCronExpressionLambda', !Ref BUILDGUID ] ]
|
||||
Code:
|
||||
ZipFile: |
|
||||
from datetime import datetime, timedelta
|
||||
@@ -102,7 +102,7 @@ Resources:
|
||||
import logging
|
||||
import json
|
||||
import cfnresponse
|
||||
|
||||
|
||||
def deletion_time(ttl):
|
||||
delete_at_time = datetime.now() + timedelta(minutes=int(ttl))
|
||||
hh = delete_at_time.hour
|
||||
@@ -113,7 +113,7 @@ Resources:
|
||||
# minutes hours day month day-of-week year
|
||||
cron_exp = "cron({} {} {} {} ? {})".format(mm, hh, dd, month, yyyy)
|
||||
return cron_exp
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
print('Received event: %s' % json.dumps(event))
|
||||
status = cfnresponse.SUCCESS
|
||||
@@ -132,12 +132,12 @@ Resources:
|
||||
Handler: "index.handler"
|
||||
Runtime: "python3.6"
|
||||
Timeout: "5"
|
||||
Role:
|
||||
Role:
|
||||
'Fn::ImportValue': !Sub '${EnvironmentName}:DeleteCFNLambdaExecutionRole'
|
||||
GenerateCronExpression:
|
||||
Type: "Custom::GenerateCronExpression"
|
||||
Version: "1.0"
|
||||
Properties:
|
||||
Name: !Join [ "", [ 'GenerateCronExpression', !Ref BUILDID ] ]
|
||||
Name: !Join [ "", [ 'GenerateCronExpression', !Ref BUILDGUID ] ]
|
||||
ServiceToken: !GetAtt GenerateCronExpLambda.Arn
|
||||
ttl: !Ref 'TTL'
|
||||
|
||||
6
dist/cloud-formations/task-def-formation.yml
vendored
6
dist/cloud-formations/task-def-formation.yml
vendored
@@ -30,7 +30,7 @@ Parameters:
|
||||
Type: Number
|
||||
Default: 2048
|
||||
Description: How much memory in megabytes to give the container
|
||||
BUILDID:
|
||||
BUILDGUID:
|
||||
Type: String
|
||||
Default: ''
|
||||
Command:
|
||||
@@ -47,7 +47,7 @@ Parameters:
|
||||
Default: ''
|
||||
Description: >-
|
||||
(Optional) An IAM role to give the service's containers if the code within
|
||||
needs to access other AWS resources like S3 buckets, DynamoDB tables, etc
|
||||
needs to access other AWS resources
|
||||
EFSMountDirectory:
|
||||
Type: String
|
||||
Default: '/efsdata'
|
||||
@@ -98,7 +98,7 @@ Resources:
|
||||
Metadata:
|
||||
'AWS::CloudFormation::Designer':
|
||||
id: c6f18447-b879-4696-8873-f981b2cedd2b
|
||||
|
||||
|
||||
# template secrets p2 - secret
|
||||
|
||||
TaskDefinition:
|
||||
|
||||
253441
dist/index.js
generated
vendored
253441
dist/index.js
generated
vendored
File diff suppressed because it is too large
Load Diff
2
dist/index.js.map
generated
vendored
2
dist/index.js.map
generated
vendored
File diff suppressed because one or more lines are too long
1085
dist/licenses.txt
generated
vendored
1085
dist/licenses.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
0
dist/platforms/ubuntu/entrypoint.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/entrypoint.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/activate.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/activate.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/build.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/build.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/return_license.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/return_license.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/set_gitcredential.sh
vendored
Normal file → Executable file
0
dist/platforms/ubuntu/steps/set_gitcredential.sh
vendored
Normal file → Executable file
Reference in New Issue
Block a user