2016-11-16 15:49:24 +00:00
|
|
|
#!/usr/bin/env nix-shell
|
2019-06-04 15:10:47 +00:00
|
|
|
#!nix-shell -p awscli -p jq -p qemu -i bash
|
2020-10-29 17:57:19 +00:00
|
|
|
# shellcheck shell=bash
|
2021-11-11 01:21:55 +00:00
|
|
|
#
|
|
|
|
# Future Deprecation?
|
|
|
|
# This entire thing should probably be replaced with a generic terraform config
|
2019-06-04 15:10:47 +00:00
|
|
|
|
|
|
|
# Uploads and registers NixOS images built from the
|
|
|
|
# <nixos/release.nix> amazonImage attribute. Images are uploaded and
|
|
|
|
# registered via a home region, and then copied to other regions.
|
|
|
|
|
2020-10-30 16:40:17 +00:00
|
|
|
# The home region requires an s3 bucket, and an IAM role named "vmimport"
|
|
|
|
# (by default) with access to the S3 bucket. The name can be
|
create-amis: allow customizing the service role name
The complete setup on the AWS end can be configured
with the following Terraform configuration. It generates
a ./credentials.sh which I just copy/pasted in to the
create-amis.sh script near the top. Note: the entire stack
of users and bucket can be destroyed at the end of the
import.
variable "region" {
type = string
}
variable "availability_zone" {
type = string
}
provider "aws" {
region = var.region
}
resource "aws_s3_bucket" "nixos-amis" {
bucket_prefix = "nixos-amis-"
lifecycle_rule {
enabled = true
abort_incomplete_multipart_upload_days = 1
expiration {
days = 7
}
}
}
resource "local_file" "credential-file" {
file_permission = "0700"
filename = "${path.module}/credentials.sh"
sensitive_content = <<SCRIPT
export service_role_name="${aws_iam_role.vmimport.name}"
export bucket="${aws_s3_bucket.nixos-amis.bucket}"
export AWS_ACCESS_KEY_ID="${aws_iam_access_key.uploader.id}"
export AWS_SECRET_ACCESS_KEY="${aws_iam_access_key.uploader.secret}"
SCRIPT
}
# The following resources are for the *uploader*
resource "aws_iam_user" "uploader" {
name = "nixos-amis-uploader"
}
resource "aws_iam_access_key" "uploader" {
user = aws_iam_user.uploader.name
}
resource "aws_iam_user_policy" "upload-to-nixos-amis" {
user = aws_iam_user.uploader.name
policy = data.aws_iam_policy_document.upload-policy-document.json
}
data "aws_iam_policy_document" "upload-policy-document" {
statement {
effect = "Allow"
actions = [
"s3:ListBucket",
"s3:GetBucketLocation",
]
resources = [
aws_s3_bucket.nixos-amis.arn
]
}
statement {
effect = "Allow"
actions = [
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject",
]
resources = [
"${aws_s3_bucket.nixos-amis.arn}/*"
]
}
statement {
effect = "Allow"
actions = [
"ec2:ImportSnapshot",
"ec2:DescribeImportSnapshotTasks",
"ec2:DescribeImportSnapshotTasks",
"ec2:RegisterImage",
"ec2:DescribeImages"
]
resources = [
"*"
]
}
}
# The following resources are for the *vmimport service user*
# See: https://docs.aws.amazon.com/vm-import/latest/userguide/vmie_prereqs.html#vmimport-role
resource "aws_iam_role" "vmimport" {
assume_role_policy = data.aws_iam_policy_document.vmimport-trust.json
}
resource "aws_iam_role_policy" "vmimport-access" {
role = aws_iam_role.vmimport.id
policy = data.aws_iam_policy_document.vmimport-access.json
}
data "aws_iam_policy_document" "vmimport-access" {
statement {
effect = "Allow"
actions = [
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
]
resources = [
aws_s3_bucket.nixos-amis.arn,
"${aws_s3_bucket.nixos-amis.arn}/*"
]
}
statement {
effect = "Allow"
actions = [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*"
]
resources = [
"*"
]
}
}
data "aws_iam_policy_document" "vmimport-trust" {
statement {
effect = "Allow"
principals {
type = "Service"
identifiers = [ "vmie.amazonaws.com" ]
}
actions = [
"sts:AssumeRole"
]
condition {
test = "StringEquals"
variable = "sts:ExternalId"
values = [ "vmimport" ]
}
}
}
2020-10-30 15:59:09 +00:00
|
|
|
# configured with the "service_role_name" variable. Configuration of the
|
|
|
|
# vmimport role is documented in
|
2019-06-04 15:10:47 +00:00
|
|
|
# https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html
|
|
|
|
|
|
|
|
# set -x
|
|
|
|
set -euo pipefail
|
|
|
|
|
2021-09-15 00:29:18 +00:00
|
|
|
var () { true; }
|
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
# configuration
|
2021-09-15 00:29:18 +00:00
|
|
|
var ${state_dir:=$HOME/amis/ec2-images}
|
|
|
|
var ${home_region:=eu-west-1}
|
|
|
|
var ${bucket:=nixos-amis}
|
|
|
|
var ${service_role_name:=vmimport}
|
2019-06-04 15:10:47 +00:00
|
|
|
|
2021-09-15 00:29:18 +00:00
|
|
|
var ${regions:=eu-west-1 eu-west-2 eu-west-3 eu-central-1 eu-north-1
|
2019-06-04 15:10:47 +00:00
|
|
|
us-east-1 us-east-2 us-west-1 us-west-2
|
|
|
|
ca-central-1
|
|
|
|
ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2
|
|
|
|
ap-south-1 ap-east-1
|
2021-09-15 00:29:18 +00:00
|
|
|
sa-east-1}
|
|
|
|
|
|
|
|
regions=($regions)
|
2019-06-04 15:10:47 +00:00
|
|
|
|
|
|
|
log() {
|
|
|
|
echo "$@" >&2
|
|
|
|
}
|
|
|
|
|
2020-05-28 15:38:40 +00:00
|
|
|
if [ "$#" -ne 1 ]; then
|
2019-06-04 15:10:47 +00:00
|
|
|
log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
# result of the amazon-image from nixos/release.nix
|
|
|
|
store_path=$1
|
|
|
|
|
|
|
|
if [ ! -e "$store_path" ]; then
|
|
|
|
log "Store path: $store_path does not exist, fetching..."
|
|
|
|
nix-store --realise "$store_path"
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [ ! -d "$store_path" ]; then
|
|
|
|
log "store_path: $store_path is not a directory. aborting"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
read_image_info() {
|
|
|
|
if [ ! -e "$store_path/nix-support/image-info.json" ]; then
|
|
|
|
log "Image missing metadata"
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
jq -r "$1" "$store_path/nix-support/image-info.json"
|
|
|
|
}
|
|
|
|
|
|
|
|
# We handle a single image per invocation, store all attributes in
|
|
|
|
# globals for convenience.
|
2021-09-17 22:00:31 +00:00
|
|
|
zfs_disks=$(read_image_info .disks)
|
2021-11-09 22:41:20 +00:00
|
|
|
is_zfs_image=
|
|
|
|
if jq -e .boot <<< "$zfs_disks"; then
|
|
|
|
is_zfs_image=1
|
|
|
|
zfs_boot=".disks.boot"
|
|
|
|
fi
|
|
|
|
image_label="$(read_image_info .label)${is_zfs_image:+-ZFS}"
|
2019-06-04 15:10:47 +00:00
|
|
|
image_system=$(read_image_info .system)
|
2021-11-09 22:41:20 +00:00
|
|
|
image_files=( $(read_image_info ".disks.root.file") )
|
2021-09-17 22:00:31 +00:00
|
|
|
|
2021-11-09 22:41:20 +00:00
|
|
|
image_logical_bytes=$(read_image_info "${zfs_boot:-.disks.root}.logical_bytes")
|
2021-09-17 22:00:31 +00:00
|
|
|
|
2021-11-09 22:41:20 +00:00
|
|
|
if [[ -n "$is_zfs_image" ]]; then
|
2021-09-17 22:00:31 +00:00
|
|
|
image_files+=( $(read_image_info .disks.boot.file) )
|
|
|
|
fi
|
2019-06-04 15:10:47 +00:00
|
|
|
|
|
|
|
# Derived attributes
|
|
|
|
|
2020-10-29 18:02:11 +00:00
|
|
|
image_logical_gigabytes=$(((image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
|
2019-06-04 15:10:47 +00:00
|
|
|
|
|
|
|
case "$image_system" in
|
|
|
|
aarch64-linux)
|
|
|
|
amazon_arch=arm64
|
|
|
|
;;
|
|
|
|
x86_64-linux)
|
|
|
|
amazon_arch=x86_64
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
log "Unknown system: $image_system"
|
|
|
|
exit 1
|
|
|
|
esac
|
|
|
|
|
|
|
|
image_name="NixOS-${image_label}-${image_system}"
|
|
|
|
image_description="NixOS ${image_label} ${image_system}"
|
|
|
|
|
|
|
|
log "Image Details:"
|
|
|
|
log " Name: $image_name"
|
|
|
|
log " Description: $image_description"
|
|
|
|
log " Size (gigabytes): $image_logical_gigabytes"
|
|
|
|
log " System: $image_system"
|
|
|
|
log " Amazon Arch: $amazon_arch"
|
|
|
|
|
|
|
|
read_state() {
|
|
|
|
local state_key=$1
|
|
|
|
local type=$2
|
|
|
|
|
|
|
|
cat "$state_dir/$state_key.$type" 2>/dev/null || true
|
|
|
|
}
|
|
|
|
|
|
|
|
write_state() {
|
|
|
|
local state_key=$1
|
|
|
|
local type=$2
|
|
|
|
local val=$3
|
|
|
|
|
2020-10-29 17:58:37 +00:00
|
|
|
mkdir -p "$state_dir"
|
2019-06-04 15:10:47 +00:00
|
|
|
echo "$val" > "$state_dir/$state_key.$type"
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_for_import() {
|
|
|
|
local region=$1
|
|
|
|
local task_id=$2
|
|
|
|
local state snapshot_id
|
|
|
|
log "Waiting for import task $task_id to be completed"
|
|
|
|
while true; do
|
2021-09-17 22:03:54 +00:00
|
|
|
read -r state message snapshot_id < <(
|
2020-10-29 17:57:19 +00:00
|
|
|
aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" | \
|
2021-09-17 22:03:54 +00:00
|
|
|
jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail | "\(.Status) \(.StatusMessage) \(.SnapshotId)"'
|
2019-06-04 15:10:47 +00:00
|
|
|
)
|
2021-09-17 22:03:54 +00:00
|
|
|
log " ... state=$state message=$message snapshot_id=$snapshot_id"
|
2019-06-04 15:10:47 +00:00
|
|
|
case "$state" in
|
|
|
|
active)
|
|
|
|
sleep 10
|
|
|
|
;;
|
|
|
|
completed)
|
|
|
|
echo "$snapshot_id"
|
|
|
|
return
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
log "Unexpected snapshot import state: '${state}'"
|
2020-10-30 15:40:58 +00:00
|
|
|
log "Full response: "
|
|
|
|
aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" >&2
|
2019-06-04 15:10:47 +00:00
|
|
|
exit 1
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_for_image() {
|
|
|
|
local region=$1
|
|
|
|
local ami_id=$2
|
|
|
|
local state
|
|
|
|
log "Waiting for image $ami_id to be available"
|
|
|
|
|
|
|
|
while true; do
|
2020-10-29 18:01:22 +00:00
|
|
|
read -r state < <(
|
2020-10-29 17:57:19 +00:00
|
|
|
aws ec2 describe-images --image-ids "$ami_id" --region "$region" | \
|
2019-06-04 15:10:47 +00:00
|
|
|
jq -r ".Images[].State"
|
|
|
|
)
|
|
|
|
log " ... state=$state"
|
|
|
|
case "$state" in
|
|
|
|
pending)
|
|
|
|
sleep 10
|
|
|
|
;;
|
|
|
|
available)
|
|
|
|
return
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
log "Unexpected AMI state: '${state}'"
|
|
|
|
exit 1
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
make_image_public() {
|
|
|
|
local region=$1
|
|
|
|
local ami_id=$2
|
2016-11-16 15:49:24 +00:00
|
|
|
|
2020-10-29 17:57:19 +00:00
|
|
|
wait_for_image "$region" "$ami_id"
|
2016-11-16 15:49:24 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
log "Making image $ami_id public"
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
aws ec2 modify-image-attribute \
|
|
|
|
--image-id "$ami_id" --region "$region" --launch-permission 'Add={Group=all}' >&2
|
|
|
|
}
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
upload_image() {
|
|
|
|
local region=$1
|
2017-04-04 11:03:05 +00:00
|
|
|
|
2021-09-17 22:00:31 +00:00
|
|
|
for image_file in "${image_files[@]}"; do
|
|
|
|
local aws_path=${image_file#/}
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2021-11-09 22:41:20 +00:00
|
|
|
if [[ -n "$is_zfs_image" ]]; then
|
2021-09-17 22:00:31 +00:00
|
|
|
local suffix=${image_file%.*}
|
|
|
|
suffix=${suffix##*.}
|
|
|
|
fi
|
2019-06-04 15:10:47 +00:00
|
|
|
|
2021-09-17 22:00:31 +00:00
|
|
|
local state_key="$region.$image_label${suffix:+.${suffix}}.$image_system"
|
|
|
|
local task_id
|
|
|
|
task_id=$(read_state "$state_key" task_id)
|
|
|
|
local snapshot_id
|
|
|
|
snapshot_id=$(read_state "$state_key" snapshot_id)
|
|
|
|
local ami_id
|
|
|
|
ami_id=$(read_state "$state_key" ami_id)
|
|
|
|
|
|
|
|
if [ -z "$task_id" ]; then
|
|
|
|
log "Checking for image on S3"
|
|
|
|
if ! aws s3 ls --region "$region" "s3://${bucket}/${aws_path}" >&2; then
|
|
|
|
log "Image missing from aws, uploading"
|
|
|
|
aws s3 cp --region "$region" "$image_file" "s3://${bucket}/${aws_path}" >&2
|
|
|
|
fi
|
|
|
|
|
|
|
|
log "Importing image from S3 path s3://$bucket/$aws_path"
|
|
|
|
|
|
|
|
task_id=$(aws ec2 import-snapshot --role-name "$service_role_name" --disk-container "{
|
|
|
|
\"Description\": \"nixos-image-${image_label}-${image_system}\",
|
|
|
|
\"Format\": \"vhd\",
|
|
|
|
\"UserBucket\": {
|
|
|
|
\"S3Bucket\": \"$bucket\",
|
|
|
|
\"S3Key\": \"$aws_path\"
|
|
|
|
}
|
|
|
|
}" --region "$region" | jq -r '.ImportTaskId')
|
|
|
|
|
|
|
|
write_state "$state_key" task_id "$task_id"
|
|
|
|
fi
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2021-09-17 22:00:31 +00:00
|
|
|
if [ -z "$snapshot_id" ]; then
|
|
|
|
snapshot_id=$(wait_for_import "$region" "$task_id")
|
|
|
|
write_state "$state_key" snapshot_id "$snapshot_id"
|
|
|
|
fi
|
|
|
|
done
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
if [ -z "$ami_id" ]; then
|
|
|
|
log "Registering snapshot $snapshot_id as AMI"
|
|
|
|
|
|
|
|
local block_device_mappings=(
|
2021-01-11 18:54:40 +00:00
|
|
|
"DeviceName=/dev/xvda,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
|
2019-06-04 15:10:47 +00:00
|
|
|
)
|
|
|
|
|
2021-11-09 22:41:20 +00:00
|
|
|
if [[ -n "$is_zfs_image" ]]; then
|
2021-09-17 22:00:31 +00:00
|
|
|
local root_snapshot_id=$(read_state "$region.$image_label.root.$image_system" snapshot_id)
|
|
|
|
|
2021-09-22 22:39:58 +00:00
|
|
|
local root_image_logical_bytes=$(read_image_info ".disks.root.logical_bytes")
|
2021-09-17 22:00:31 +00:00
|
|
|
local root_image_logical_gigabytes=$(((root_image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
|
|
|
|
|
|
|
|
block_device_mappings+=(
|
|
|
|
"DeviceName=/dev/xvdb,Ebs={SnapshotId=$root_snapshot_id,VolumeSize=$root_image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
|
|
|
|
)
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
local extra_flags=(
|
ec2/create-amis.sh: register root device as /dev/xvda
For the case of blkfront drives, there appears to be no difference
between /dev/sda1 and /dev/xvda: the drive always appears as the
kernel device /dev/xvda.
For the case of nvme drives, the root device typically appears as
/dev/nvme0n1. Amazon provides the 'ec2-utils' package for their first
party linux ("Amazon Linux"), which configures udev to create symlinks
from the provided name to the nvme device name. This name is
communicated through nvme "Identify Controller" response, which can be
inspected with:
nvme id-ctrl --raw-binary /dev/nvme0n1 | cut -c3073-3104 | hexdump -C
On Amazon Linux, where the device is attached as "/dev/xvda", this
creates:
- /dev/xvda -> nvme0n1
- /dev/xvda1 -> nvme0n1p1
On NixOS where the device is attach as "/dev/sda1", this creates:
- /dev/sda1 -> nvme0n1
- /dev/sda11 -> nvme0n1p1
This is odd, but not inherently a problem.
NixOS unconditionally configures grub to install to `/dev/xvda`, which
fails on an instance using nvme storage. With the root device name set
to xvda, both blkfront and nvme drives are accessible as /dev/xvda,
either directly or by symlink.
2019-08-23 13:15:58 +00:00
|
|
|
--root-device-name /dev/xvda
|
2019-06-04 15:10:47 +00:00
|
|
|
--sriov-net-support simple
|
|
|
|
--ena-support
|
|
|
|
--virtualization-type hvm
|
|
|
|
)
|
|
|
|
|
2020-10-29 18:01:43 +00:00
|
|
|
block_device_mappings+=("DeviceName=/dev/sdb,VirtualName=ephemeral0")
|
|
|
|
block_device_mappings+=("DeviceName=/dev/sdc,VirtualName=ephemeral1")
|
|
|
|
block_device_mappings+=("DeviceName=/dev/sdd,VirtualName=ephemeral2")
|
|
|
|
block_device_mappings+=("DeviceName=/dev/sde,VirtualName=ephemeral3")
|
2019-06-04 15:10:47 +00:00
|
|
|
|
|
|
|
ami_id=$(
|
|
|
|
aws ec2 register-image \
|
|
|
|
--name "$image_name" \
|
|
|
|
--description "$image_description" \
|
2020-10-29 17:57:19 +00:00
|
|
|
--region "$region" \
|
2019-06-04 15:10:47 +00:00
|
|
|
--architecture $amazon_arch \
|
|
|
|
--block-device-mappings "${block_device_mappings[@]}" \
|
2021-11-10 21:12:01 +00:00
|
|
|
--boot-mode $(read_image_info .boot_mode) \
|
2019-06-04 15:10:47 +00:00
|
|
|
"${extra_flags[@]}" \
|
|
|
|
| jq -r '.ImageId'
|
|
|
|
)
|
|
|
|
|
|
|
|
write_state "$state_key" ami_id "$ami_id"
|
|
|
|
fi
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2021-09-17 19:56:22 +00:00
|
|
|
[[ -v PRIVATE ]] || make_image_public "$region" "$ami_id"
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
echo "$ami_id"
|
|
|
|
}
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
copy_to_region() {
|
|
|
|
local region=$1
|
|
|
|
local from_region=$2
|
|
|
|
local from_ami_id=$3
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
state_key="$region.$image_label.$image_system"
|
|
|
|
ami_id=$(read_state "$state_key" ami_id)
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
if [ -z "$ami_id" ]; then
|
|
|
|
log "Copying $from_ami_id to $region"
|
|
|
|
ami_id=$(
|
|
|
|
aws ec2 copy-image \
|
|
|
|
--region "$region" \
|
|
|
|
--source-region "$from_region" \
|
|
|
|
--source-image-id "$from_ami_id" \
|
|
|
|
--name "$image_name" \
|
|
|
|
--description "$image_description" \
|
|
|
|
| jq -r '.ImageId'
|
|
|
|
)
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
write_state "$state_key" ami_id "$ami_id"
|
|
|
|
fi
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2021-09-17 19:56:22 +00:00
|
|
|
[[ -v PRIVATE ]] || make_image_public "$region" "$ami_id"
|
2019-06-04 15:10:47 +00:00
|
|
|
|
|
|
|
echo "$ami_id"
|
|
|
|
}
|
2016-07-12 14:57:52 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
upload_all() {
|
|
|
|
home_image_id=$(upload_image "$home_region")
|
|
|
|
jq -n \
|
|
|
|
--arg key "$home_region.$image_system" \
|
|
|
|
--arg value "$home_image_id" \
|
|
|
|
'$ARGS.named'
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
for region in "${regions[@]}"; do
|
|
|
|
if [ "$region" = "$home_region" ]; then
|
|
|
|
continue
|
|
|
|
fi
|
|
|
|
copied_image_id=$(copy_to_region "$region" "$home_region" "$home_image_id")
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
jq -n \
|
|
|
|
--arg key "$region.$image_system" \
|
|
|
|
--arg value "$copied_image_id" \
|
|
|
|
'$ARGS.named'
|
2015-09-27 19:01:43 +00:00
|
|
|
done
|
2019-06-04 15:10:47 +00:00
|
|
|
}
|
2015-09-27 19:01:43 +00:00
|
|
|
|
2019-06-04 15:10:47 +00:00
|
|
|
upload_all | jq --slurp from_entries
|