Skip to content

Commit

Permalink
[WIP] Create test pipeline
Browse files Browse the repository at this point in the history
Signed-off-by: Dimitris Karakasilis <[email protected]>
  • Loading branch information
jimmykarily committed Jan 17, 2025
1 parent 4c7eaa5 commit e32fe55
Show file tree
Hide file tree
Showing 4 changed files with 464 additions and 121 deletions.
46 changes: 46 additions & 0 deletions .github/release-space-from-ubuntu-runners.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#!/bin/bash

echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
sudo apt-get remove -y '^mono-.*' || true
sudo apt-get remove -y '^ghc-.*' || true
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y hhvm || true
sudo apt-get remove -y powershell || true
sudo apt-get remove -y firefox || true
sudo apt-get remove -y monodoc-manual || true
sudo apt-get remove -y msbuild || true
sudo apt-get remove -y microsoft-edge-stable || true
sudo apt-get remove -y '^google-.*' || true
sudo apt-get remove -y azure-cli || true
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
sudo apt-get remove -y '^gfortran-.*' || true
sudo apt-get remove -y '^gcc-*' || true
sudo apt-get remove -y '^g++-*' || true
sudo apt-get remove -y '^cpp-*' || true
sudo apt-get autoremove -y
sudo apt-get clean
echo
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
sudo rm -rfv build || true

sudo rm -rf /usr/local/lib/android # will release about 10 GB if you don't need Android
sudo rm -rf /usr/share/dotnet # will release about 20GB if you don't need .NET

df -h


235 changes: 235 additions & 0 deletions .github/upload-image-to-aws.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,235 @@
#!/bin/bash

# Given a raw image created with Auroraboot, this script will upload it to the speficied AWS account as a public AMI.
# Docs:
# https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-ami-boot-mode.html
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/launch-instance-boot-mode.html

set -e
set -o pipefail

checkArguments() {
if [ $# -lt 1 ]; then
echo "Error: You need to specify the cloud image to upload."
echo "Usage: $0 <cloud-image>"
exit 1
fi

local file="$1"

if [ ! -f "$file" ]; then
echo "Error: File '$file' does not exist."
exit 1
fi

if ! file "$file" | grep -q 'DOS/MBR boot sector'; then
echo "Error: File '$file' is not a raw image."
exit 1
fi
}

checkEnvVars() {
if [ -z "$AWS_PROFILE" ] || [ -z "$AWS_REGION" ] || [ -z "$AWS_S3_BUCKET" ]; then
echo "Error: AWS_PROFILE, AWS_REGION and AWS_S3_BUCKET environment variables must be set."
exit 1
fi
}

AWS() {
aws --profile $AWS_PROFILE --region $AWS_REGION "$@"
}

# https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html#vmimport-role
ensureVmImportRole() {
(AWS iam list-roles | jq -r '.Roles[] | select(.RoleName | contains("vmimport")) | .RoleName' | grep -q "vmimport" && echo "vmimport role found. All good.") || {
echo "Creating vmimport role"

export AWS_PAGER="" # Avoid being dropped to a pager
AWS iam create-role --role-name vmimport --assume-role-policy-document file://<(cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "vmie.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "vmimport"
}
}
}
]
}
EOF
)

# AWS iam attach-role-policy --role-name vmimport --policy-arn arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM

AWS iam put-role-policy --role-name vmimport --policy-name vmimport --policy-document file://<(cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetBucketLocation",
"s3:GetBucketAcl",
"s3:GetObject",
"s3:PutObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::$AWS_S3_BUCKET",
"arn:aws:s3:::$AWS_S3_BUCKET/*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*"
],
"Resource": "*"
}
]
}
EOF
)

sleep 10 # Wait for the policy and permissions to be effective. This is not ideal but I couldn't find any better way.
}
}

uploadImageToS3() {
local file="$1"
local baseName=$(basename "$file")
local s3Path="s3://$AWS_S3_BUCKET/$file"

if AWS s3 ls "$AWS_S3_BUCKET/$baseName" > /dev/null 2>&1; then
echo "File '$baseName' already exists in S3 bucket '$AWS_S3_BUCKET'."
else
echo "File '$baseName' does not exist in S3 bucket '$AWS_S3_BUCKET'. Uploading now."
AWS s3 cp $1 s3://$AWS_S3_BUCKET/$baseName
fi
}

waitForSnapshotCompletion() {
local taskID="$1"
local status=""

while true; do
status=$(AWS ec2 describe-import-snapshot-tasks --import-task-ids "$taskID" --query 'ImportSnapshotTasks[0].SnapshotTaskDetail.Status' --output text)

if [ "$status" == "completed" ]; then
echo "Snapshot import completed."
break
elif [ "$status" == "deleted" ] || [ "$status" == "cancelling" ] || [ "$status" == "cancelled" ]; then
echo "Snapshot import failed with status: $status"
exit 1
else
echo "Waiting for snapshot import to complete. Current status: $status" >&2
sleep 30
fi
done

echo $(AWS ec2 describe-import-snapshot-tasks --import-task-ids "$taskID" --query 'ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId' --output text)
}

importAsSnapshot() {
local file="$1"
local snapshotID

snapshotID=$(AWS ec2 describe-snapshots --filters "Name=tag:SourceFile,Values=$file" --query "Snapshots[0].SnapshotId" --output text)
if [ "$snapshotID" != "None" ]; then
echo "Snapshot $snapshotID already exists for file $file"
echo $snapshotID
return 0
fi

taskID=$(AWS ec2 import-snapshot --description "$file" --disk-container file://<(cat <<EOF
{
"Description": "$file",
"Format": "RAW",
"UserBucket": {
"S3Bucket": "$AWS_S3_BUCKET",
"S3Key": "$file"
}
}
EOF
) --query 'ImportTaskId' --output text)
if [ $? -ne 0 ]; then
echo "Failed to import snapshot"
return 1
fi

echo "Snapshot import task started with ID: $taskID"

snapshotID=$(waitForSnapshotCompletion "$taskID" | tail -1 | tee /dev/tty)
echo "Adding tag to the snapshot with ID: $snapshotID"
AWS ec2 create-tags --resources $snapshotID --tags Key=SourceFile,Value=$file

echo "$snapshotID" # Return the snapshot ID so that we can grab it with `tail -1`
}

checkImageExistsOrCreate() {
local imageName="$1"
local snapshotID="$2"
local imageID

# Check if the image already exists
imageID=$(AWS ec2 describe-images --filters "Name=name,Values=$imageName" --query 'Images[0].ImageId' --output text)

if [ "$imageID" != "None" ]; then
echo "Image '$imageName' already exists with Image ID: $imageID"
else
echo "Image '$imageName' does not exist. Creating from snapshot..."

imageID=$(AWS ec2 register-image \
--name "$imageName" \
--description "AMI created from snapshot $snapshotID" \
--architecture x86_64 \
--root-device-name /dev/xvda \
--block-device-mappings "[{\"DeviceName\":\"/dev/xvda\",\"Ebs\":{\"SnapshotId\":\"$snapshotID\"}}]" \
--virtualization-type hvm \
--boot-mode uefi \
--ena-support \
--query 'ImageId' \
--output text)

echo "Image '$imageName' created with Image ID: $imageID"
fi
}

makeAMIpublic() {
local imageName="$1"
local imageID

imageID=$(AWS ec2 describe-images --filters "Name=name,Values=$imageName" --query 'Images[0].ImageId' --output text)

if [ "$imageID" == "None" ]; then
echo "Error: Image '$imageName' does not exist."
exit 1
fi

echo "Making image '$imageName' public..."
AWS ec2 modify-image-attribute --image-id $imageID --launch-permission "{\"Add\":[{\"Group\":\"all\"}]}"
echo "Image '$imageName' is now public."
}

# ----- Main script -----
baseName=$(basename "$1")
checkEnvVars
checkArguments "$@"
ensureVmImportRole
uploadImageToS3 $1
output=$(importAsSnapshot $baseName | tee /dev/tty)
snapshotID=$(echo "$output" | tail -1)
checkImageExistsOrCreate $baseName $snapshotID
makeAMIpublic $baseName
97 changes: 97 additions & 0 deletions .github/workflows/push-aws-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
name: Push to AWS

on:
pull_request:
branches:
- master

permissions: read-all
jobs:
build-aws-image:
runs-on: ubuntu-latest
permissions:
id-token: write # OIDC support
contents: write
actions: read
security-events: write
strategy:
matrix:
include:
# We don't publish AWS images for all combinations so we go hardcoded here
- flavor: ubuntu
flavor_release: 24.04
family: ubuntu
base_image: ubuntu:24.04
variant: standard
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- run: |
git fetch --prune --unshallow
- name: Release space from worker
run: |
./github/release-space-from-ubuntu-runners.sh
- name: Install kairos-agent (for versioneer)
uses: Luet-lab/luet-install-action@cec77490c3f2416d7d07a47cfab04d448641d7ce # v1.1
with:
repository: quay.io/kairos/packages
packages: system/kairos-agent
- name: Build 🔧
run: |
# REPLACE_IN_FINAL
# TODO: Does "sort" work correctly on k3s versions? Which version do we want?
#k3s_version=$(echo '${{ needs.get-standard-matrix.outputs.matrix }}' | \
# jq -r '[.[].k3s_version] | unique | sort | .[0]')
k3s_version=v1.32.0-k3s1
# REPLACE_IN_FINAL
#version=$(git describe --always --tags --dirty)
version=master
containerImage=$(kairos-agent versioneer container-artifact-name \
--flavor ${{ .matrix.flavor }} \
--flavor-release ${{ .matrix.flavor_release }} \
--variant ${{ .matrix.variant }} \
--model generic \
--arch amd64 \
--software-version-prefix k3s \
--registry-and-org quay.io/kairos \
--software-version "$k3s_version" \
--version "$version"
)
docker run -v /var/run/docker.sock:/var/run/docker.sock --net host \
--privileged \
-v $PWD:/aurora --rm -ti quay.io/kairos/auroraboot \
--debug \
--set "disable_http_server=true" \
--set "container_image=docker:${containerImage}" \
--set "disable_netboot=true" \
--set "disk.raw=true" \
--set "state_dir=/aurora"
- name: Install AWS cli
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
# https://github.com/aws-actions/configure-aws-credentials?tab=readme-ov-file#assumerole-with-static-iam-credentials-in-repository-secrets
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
role-external-id: ${{ secrets.AWS_ROLE_EXTERNAL_ID }}
role-duration-seconds: 1200
role-session-name: AWSCIPush

- name: Push to AWS
env:
AWS_S3_BUCKET: kairos-cloud-images
AWS_PROFILE: default
AWS_REGION: eu-central-1
run: |
AWS_PROFILE=kairos
.github/upload-image-to-aws.sh $(ls *.raw)
Loading

0 comments on commit e32fe55

Please sign in to comment.