From da238a7276d067597f374d2e58ba3866316fb099 Mon Sep 17 00:00:00 2001 From: Adam Williamson Date: Jun 04 2024 16:19:40 +0000 Subject: Remove most claims that this thing is cloud-specific This doesn't rename the project itself - it's still cloud-image-uploader in README.md for now - but it does at least rename the Python library (to just "fedora_image_uploader") and adjust a lot of places where, in some way, we claim or imply that this thing only handles cloud images. Signed-off-by: Adam Williamson --- diff --git a/Containerfile b/Containerfile index c5b8fa8..aa6ea34 100644 --- a/Containerfile +++ b/Containerfile @@ -9,9 +9,9 @@ RUN dnf install -y \ skopeo \ buildah -RUN mkdir -p /srv/cloud-uploader/ -COPY . /srv/cloud-uploader/src -WORKDIR /srv/cloud-uploader +RUN mkdir -p /srv/image-uploader/ +COPY . /srv/image-uploader/src +WORKDIR /srv/image-uploader RUN python3 -m venv venv && \ venv/bin/pip install --no-cache-dir src/ @@ -20,12 +20,12 @@ RUN python3 -m venv venv && \ # https://github.com/ansible-collections/azure/pull/1466 RUN venv/bin/ansible-galaxy collection install --force azure.azcollection && \ pushd ~/.ansible/collections/ansible_collections/azure/azcollection/ && \ - patch -p1 < /srv/cloud-uploader/src/patches/azure-pr-1466.patch && \ + patch -p1 < /srv/image-uploader/src/patches/azure-pr-1466.patch && \ popd # ansible-runner shells out so the PATH needs to include the virtualenv -ENV PATH="/srv/cloud-uploader/venv/bin:$PATH" +ENV PATH="/srv/image-uploader/venv/bin:$PATH" # Add the virtual environment to the path -ENTRYPOINT ["/srv/cloud-uploader/venv/bin/fedora-messaging"] -CMD ["consume", "--callback=fedora_cloud_image_uploader:Uploader"] +ENTRYPOINT ["/srv/image-uploader/venv/bin/fedora-messaging"] +CMD ["consume", "--callback=fedora_image_uploader:Uploader"] diff --git a/fedora-messaging.toml.example b/fedora-messaging.toml.example index a874e59..ac6a2bd 100644 --- a/fedora-messaging.toml.example +++ b/fedora-messaging.toml.example @@ -1,6 +1,6 @@ # A sample configuration for fedora-messaging. This file is in the TOML format. amqp_url = "amqp://" -callback = "fedora_cloud_image_uploader:Uploader" +callback = "fedora_image_uploader:Uploader" passive_declares = false publish_exchange = "amq.topic" topic_prefix = "" @@ -11,7 +11,7 @@ keyfile = "/etc/fedora-messaging/fedora-key.pem" certfile = "/etc/fedora-messaging/fedora-cert.pem" [client_properties] -app = "Fedora Cloud Image Uploader" +app = "Fedora Image Uploader" app_url = "https://pagure.io/cloud-image-uploader" app_contacts_email = "cloud@lists.fedoraproject.org" @@ -72,7 +72,7 @@ class = "logging.StreamHandler" formatter = "simple" stream = "ext://sys.stdout" -[log_config.loggers.fedora_cloud_image_uploader] +[log_config.loggers.fedora_image_uploader] level = "INFO" propagate = false handlers = ["console"] diff --git a/fedora_cloud_image_uploader/__init__.py b/fedora_cloud_image_uploader/__init__.py deleted file mode 100644 index b125f67..0000000 --- a/fedora_cloud_image_uploader/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -import os - -PLAYBOOKS = os.path.abspath(os.path.join(os.path.dirname(__file__), "playbooks/")) -__version__ = "0.3.0" - -from .handler import Uploader # noqa: F401, E402 diff --git a/fedora_cloud_image_uploader/cli.py b/fedora_cloud_image_uploader/cli.py deleted file mode 100644 index 587e48b..0000000 --- a/fedora_cloud_image_uploader/cli.py +++ /dev/null @@ -1,16 +0,0 @@ -import json - -import click -from fedora_messaging import config, message - -from . import handler - - -@click.command() -@click.argument("message_file", type=click.File("r")) -def main(message_file): - """Process AMQP messages from a file, rather than connecting to the broker.""" - config.conf.setup_logging() - msg = message.load_message(json.load(message_file)) - consumer = handler.Uploader() - consumer(msg) diff --git a/fedora_cloud_image_uploader/handler.py b/fedora_cloud_image_uploader/handler.py deleted file mode 100644 index 425e82f..0000000 --- a/fedora_cloud_image_uploader/handler.py +++ /dev/null @@ -1,396 +0,0 @@ -import datetime -import hashlib -import logging -import lzma -import os -import subprocess -import tempfile -import time -from collections.abc import Iterable - -import ansible_runner -from azure import identity as az_identity -from azure.mgmt.compute import ComputeManagementClient -from fedfind import exceptions as ff_exceptions -from fedfind import helpers as ff_helpers -from fedfind import release as ff_release -from fedora_messaging import config -from fedora_messaging import exceptions as fm_exceptions -from fedora_messaging import message as fm_message -from requests import Session, adapters -from requests.exceptions import RequestException -from urllib3.util import Retry - -from . import PLAYBOOKS - -_log = logging.getLogger(__name__) - - -def _run(args: Iterable[str]): - """Run a command and handle errors.""" - _log.debug("image_uploader running command %s", " ".join(args)) - try: - ret = subprocess.run(args, encoding="utf-8", capture_output=True, timeout=7200) - except subprocess.TimeoutExpired: - _log.error("Command: %s timed out after two hours", " ".join(args)) - raise fm_exceptions.Nack() - except OSError as err: - _log.error("Command: %s caused error %s", " ".join(args), err) - raise fm_exceptions.Nack() - if ret.returncode: - _log.error("Command: %s returned %d", " ".join(args), ret.returncode) - _log.error("stdout: %s", ret.stdout) - _log.error("stderr: %s", ret.stderr) - raise fm_exceptions.Nack() - - -class Uploader: - - def __init__(self): - self.conf = config.conf["consumer_config"] - self.requests = Session() - retry_config = Retry(total=5, backoff_factor=1) - self.requests.mount("https://", adapters.HTTPAdapter(max_retries=retry_config)) - self.handlers = (self.handle_azure, self.handle_container) - # tracks the container repos we got images for, for manifest - # creation purposes - self.container_repos = dict() - - def __call__(self, message: fm_message.Message): - """ - Consumes Pungi messages and uploads cloud images from finished composes. - """ - # We only care about finished composes, but we can't filter that out at the - # AMQP topic level. - if message.body.get("status") not in ("FINISHED", "FINISHED_INCOMPLETE"): - return - - try: - compose_id = message.body["compose_id"] - except KeyError: - _log.error("Message body is missing 'compose_id' key!") - return - - try: - ffrel = ff_release.get_release(cid=compose_id) - except ff_exceptions.UnsupportedComposeError: - _log.info("Skipping compose %s as it contains no images", compose_id) - return - # reset for each message - self.container_repos = dict() - try: - for image in ffrel.all_images: - for handler in self.handlers: - handler(image, ffrel) - except fm_exceptions.Nack: - # If we failed to process an image, it's not likely the failure will resolve - # itself in the time it takes to re-queue the message and then consume it again. - # This stops us from hammering the broker and spinning as fast as we can. - time.sleep(60) - raise - - if self.container_repos: - # manifest stuff - for repo in self.container_repos: - for registry in self.conf["container"]["registries"]: - # something like "registry.fedoraproject.org/fedora:40" - regname = f"{registry}/{repo}:{str(ffrel.relnum)}" - targets = [regname] - # we also create aliased manifests for rawhide and - # latest stable - if ffrel.release.lower() == "rawhide": - targets.append(f"{registry}/{repo}:rawhide") - elif ffrel.relnum == ff_helpers.get_current_release(branched=False): - targets.append(f"{registry}/{repo}:latest") - for target in targets: - # wipe the manifest if it exists already - _run(("buildah", "rmi", target)) - # create the manifest with all arches - createargs = ["buildah", "manifest", "create", target] - createargs.extend( - # it's intentional that this is regname not target - f"{regname}-{arch}" - for arch in self.container_repos[repo] - ) - _run(createargs) - # push it - pushargs = ( - "buildah", - "manifest", - "push", - target, - f"docker://{target}", - "--all", - ) - _run(pushargs) - - def download_image(self, image: dict, dest_dir: str, decompress=False) -> str: - """ - Download, verify, and optionally decompress the image. - - Args: - image (dict): An image from a `fedfind.release.Release`. - dest_dir (os.PathLike): Where to write the image. - decompress (bool): Whether or not to LZMA decompress the image as it's - downloaded. - """ - image_file_name = os.path.basename(image["path"]) - image_dest = os.path.join(dest_dir, image_file_name) - # Consider using getattr or something to make this work for new checksum - # algorithms. - checksum = hashlib.sha256(usedforsecurity=False) - expected_checksum = image["checksums"]["sha256"] - image_url = image["url"] - decompressor = None - if decompress: - image_dest = os.path.join(dest_dir, image_file_name.removesuffix(".xz")) - decompressor = lzma.LZMADecompressor() - - try: - with self.requests.get(image_url, stream=True, timeout=30) as req: - _log.info(f"Writing {image_url} to {image_dest}") - req.raise_for_status() - with open(image_dest, "wb") as fd: - for chunk in req.iter_content(chunk_size=16 * 1024): - checksum.update(chunk) - if decompressor and chunk: - chunk = decompressor.decompress(chunk) - fd.write(chunk) - except RequestException as e: - _log.error("Failed to download image from %s: %s", image_url, e) - raise fm_exceptions.Nack() - - actual_checksum = checksum.hexdigest().lower() - _log.info("Image downloaded (sha256:%s)", actual_checksum) - if decompressor and (not decompressor.eof or decompressor.unused_data): - _log.error( - "LZMA unexpected state: EOF %s, unused bytes: %d", - decompressor.eof, - len(decompressor.unused_data), - ) - raise fm_exceptions.Nack() - - if actual_checksum != expected_checksum.lower(): - _log.error( - "Image checksum (sha256:%s) does not match expected checksum (sha256:%s)", - actual_checksum, - expected_checksum, - ) - raise fm_exceptions.Nack() - - return image_dest - - def run_playbook(self, playbook: str, variables: dict, workdir: str): - """ - Execute Ansible playbook in workdir using variables. - - Args: - playbook (str): The path of the playbook to execute. - variables (dict): Variables to be used. - workdir (str): The path to the working directory to use. - """ - _log.info("Executing playbook %s", playbook) - result = ansible_runner.interface.run( - playbook=playbook, - timeout=30 * 60, - private_data_dir=workdir, - envvars={ - "ANSIBLE_HOME": workdir, - "ANSIBLE_COLLECTIONS_PATH": "/root/.ansible/collections", - }, - extravars=variables, - ) - if result.rc != 0: - _log.error(f"Playbook failed with return code {result.rc}") - raise fm_exceptions.Nack() - - def handle_azure(self, image: dict, ffrel: ff_release.Release): - """ - Handle Azure images. - """ - if image.get("subvariant") != "Cloud_Base" or image.get("type") != "vhd-compressed": - return - if image["arch"] not in ("x86_64", "aarch64"): - # unsupported arch - return - if ffrel.relnum < 40: - # images prior to F40 aren't supported - return - - with tempfile.TemporaryDirectory() as workdir: - image_path = self.download_image(image, workdir, decompress=True) - # Generate variables - if hasattr(ffrel, "label") and ffrel.label: - # These are in the format {milestone}-X.Z - (y_release, z_release) = ffrel.label.split("-")[1].split(".") - image_suffix = ( - ffrel.release - if ffrel.label.lower().startswith("rc") - else f"{ffrel.release}-Prerelease" - ) - else: - y_release = ffrel.metadata["composeinfo"]["payload"]["compose"]["date"] - z_release = ffrel.metadata["composeinfo"]["payload"]["compose"]["respin"] - image_suffix = ( - ffrel.release - if ffrel.release.lower() == "rawhide" - else f"{ffrel.release}-Prerelease" - ) - gallery_image_name = f"Fedora-Cloud-{image_suffix}" - image_version = f"{ffrel.relnum}.{y_release}.{z_release}" - eol = ffrel.eol - if not eol and ffrel.release.lower() != "rawhide": - # It's probably a pre-release or GA image. We can reasonably guess - # EOL will be at _least_ a year. Better to under-promise and over-deliver. - eol = datetime.datetime.today() + datetime.timedelta(days=365) - eol = eol.strftime("%Y-%m-%d") - - variables = { - "location": self.conf["azure"]["location"], - "resource_group_name": self.conf["azure"]["resource_group_name"], - "gallery_name": self.conf["azure"]["gallery_name"], - "gallery_description": self.conf["azure"]["gallery_description"], - "storage_account_name": self.conf["azure"]["storage_account_name"], - "storage_container_name": self.conf["azure"]["storage_container_name"], - "target_regions": self.conf["azure"]["target_regions"], - "architecture": {"x86_64": "x64", "aarch64": "Arm64"}[image["arch"]], - "image_source": image_path, - "gallery_image_name": gallery_image_name, - "gallery_image_version": image_version, - "end_of_life_date": eol, - "exclude_from_latest": True, - "ansible_remote_tmp": workdir, - } - playbook = os.path.join(PLAYBOOKS, "azure.yml") - self.run_playbook(playbook, variables, workdir) - try: - self.azure_cleanup_images() - except Exception: - _log.exception("Unable to clean up Azure images") - - def azure_cleanup_images(self): - """ - Remove old images from the Azure Compute Gallery. - """ - subscription = os.environ["AZURE_SUBSCRIPTION_ID"] - creds = az_identity.ClientSecretCredential( - tenant_id=os.environ["AZURE_TENANT"], - client_id=os.environ["AZURE_CLIENT_ID"], - client_secret=os.environ["AZURE_SECRET"], - ) - compute_client = ComputeManagementClient( - credential=creds, - subscription_id=subscription, - api_version="2023-07-03", - ) - - resource_group = self.conf["azure"]["resource_group_name"] - gallery_name = self.conf["azure"]["gallery_name"] - _log.info("Querying image definitions in gallery %s", gallery_name) - for image_definition in compute_client.gallery_images.list_by_gallery( - resource_group_name=resource_group, gallery_name=gallery_name - ): - end_of_life_images = [] - excluded_images = [] - _log.info("Querying image versions in definition %s", image_definition.name) - image_versions = list( - compute_client.gallery_image_versions.list_by_gallery_image( - resource_group_name=resource_group, - gallery_name=gallery_name, - gallery_image_name=image_definition.name, - ) - ) - for image_version in image_versions: - if ( - image_version.publishing_profile.exclude_from_latest - or "rawhide" in image_definition.name.lower() - ): - excluded_images.append(image_version) - if ( - image_version.publishing_profile.end_of_life_date - and image_version.publishing_profile.end_of_life_date - < datetime.datetime.now(datetime.UTC) - ): - end_of_life_images.append(image_version) - excluded_images.sort( - key=lambda version: version.publishing_profile.published_date, reverse=True - ) - - _log.info( - "Removing %d out of %d images from %s", - max(len(excluded_images) - 7, 0), - len(excluded_images), - image_definition.name, - ) - # Save the latest week of images that have been excluded from latest - for image in excluded_images[7:]: - compute_client.gallery_image_versions.begin_delete( - resource_group, gallery_name, image_definition.name, image.name - ) - _log.info( - f"Deleted image {image.name} (excluded from latest) from " - f"{image_definition.name} since 7 newer versions exist" - ) - for image in end_of_life_images: - compute_client.gallery_image_versions.begin_delete( - resource_group, gallery_name, image_definition.name, image.name - ) - _log.info( - f"Deleted image {image.name} from {image_definition.name} " - "since the image is end-of-life" - ) - - if len(image_versions) == 0: - compute_client.gallery_images.begin_delete( - resource_group, gallery_name, image_definition.name - ) - _log.info( - f"Deleted image definition {image_definition.name} since it has no versions" - ) - - def handle_container(self, image: dict, ffrel: ff_release.Release): - """Handle container images.""" - registries = self.conf.get("container", {}).get("registries") - if not registries: - # we can't do anything if no registries are configured - return - if image["type"] not in ("docker", "ociarchive"): - # not a known container image type - return - repos = { - "Container_Toolbox": "fedora-toolbox", - "Container_Minimal_Base": "fedora-minimal", - "Container_Base": "fedora", - "Silverblue": "fedora-silverblue", - "Kinoite": "fedora-kinoite", - "Onyx": "fedora-onyx", - "Sericea": "fedora-sericea", - } - repo = repos.get(image["subvariant"]) - if not repo: - _log.debug("Unknown subvariant %s", image["subvariant"]) - return - if image["type"] == "docker" and ffrel.relnum < 40: - # these are actual docker archive images - imgformat = "docker-archive" - else: - # all others are OCI archives; F40+ .oci.tar.xz images - # with type "docker" are xz-compressed OCI archives, - # .ociarchive images with type "ociarchive" are non- - # compressed OCI archives - imgformat = "oci-archive" - arch = image["arch"] - with tempfile.TemporaryDirectory() as workdir: - image_path = self.download_image(image, workdir, decompress=True) - for registry in registries: - args = [ - "skopeo", - "copy", - f"{imgformat}:{image_path}", - f"docker://{registry}/{repo}:{str(ffrel.relnum)}-{arch}", - ] - _run(args) - if repo in self.container_repos: - self.container_repos[repo].append(arch) - else: - self.container_repos[repo] = [arch] diff --git a/fedora_cloud_image_uploader/playbooks/azure.yml b/fedora_cloud_image_uploader/playbooks/azure.yml deleted file mode 100644 index c6215f1..0000000 --- a/fedora_cloud_image_uploader/playbooks/azure.yml +++ /dev/null @@ -1,165 +0,0 @@ -# The user can either have already authenticated with the CLI (e.g. az login), -# or provide credentials via environment variables. The following environment -# variables are used by the azure collection: -# AZURE_PROFILE -# AZURE_SUBSCRIPTION_ID -# AZURE_CLIENT_ID -# AZURE_SECRET -# AZURE_TENANT -# AZURE_AD_USER -# AZURE_PASSWORD -# AZURE_CLOUD_ENVIRONMENT -# AZURE_CERT_VALIDATION_MODE -# AZURE_ADFS_AUTHORITY_URL -# AZURE_X509_CERTIFICATE_PATH -# AZURE_THUMBPRINT -# -# For example, if you added an app registration, created a client secret for it, and added it to a subscriptions -# access control with enough priveleges ("Contributor" role is over-broad, but enough for testing) to your -# subscription, you could provide the AZURE_SECRET, AZURE_TENANT, AZURE_CLIENT_ID, and AZURE_SUBSCRIPTION_ID -# variables to authenticate. -# -# The caller must define the following variables: -# architecture: The architecture of the image being uploaded; one of "x64" and "Arm64". -# image_source: The path on the local filesystem where the image is stored. -# gallery_image_name: Name of an image definition, which contains image versions. -# gallery_image_version: Image version; must be in Major.Minor.Patch format, each within a 32-bit integer range. -# end_of_life_date: ISO-8601 format date indicating when the operating system reaches end-of-life. Can be null. -# exclude_from_latest: boolean to indicate whether this should be marked as the latest image. -# If true, VMs deployed from the image definition rather than a specific -# version will use this version. ---- -- name: Create Fedora Azure marketplace image - hosts: localhost - vars: - # The Azure Compute Gallery name. - # - # Must be letters, numbers, underscores, and periods. Cannot begin or end with underscores or periods. - gallery_name: Fedora - gallery_description: | - The Fedora compute gallery. - gallery_image_name: "40" - release_note_uri: "https://docs.fedoraproject.org/en-US/fedora/f40/release-notes/" - privacy_statement_uri: "https://docs.fedoraproject.org/en-US/legal/privacy/" - - # List of dictionaries describing the replication rules for each Azure region. - target_regions: - - name: eastus2 - regional_replica_count: 2 - # https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview#types-of-storage-accounts - storage_account_type: Standard_LRS - - # The Azure region all resources are deployed in. - location: eastus2 - resource_group_name: fedora-ansible-test - # Must be between 3 and 24 characters, numbers and lowercase letters only, globally unique - # - # https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview#storage-account-name - storage_account_name: fedoraansibletest - # Container for a collection of blobs. - # - # Must be lowercase letters, numbers, and hyphens. Must begin with a letter or number. - # Must be between 3 and 63 characters long. - storage_container_name: fedora-vhds - - tasks: - - - name: Create a resource group for everything - azure.azcollection.azure_rm_resourcegroup: - name: "{{ resource_group_name }}" - location: "{{ location }}" - state: present - - - name: Create a storage account for image uploads - azure.azcollection.azure_rm_storageaccount: - resource_group: "{{ resource_group_name }}" - name: "{{ storage_account_name }}" - type: Standard_ZRS - kind: "StorageV2" - - - name: Create a container in the storage account for images - azure.azcollection.azure_rm_storageblob: - resource_group: "{{ resource_group_name }}" - storage_account_name: "{{ storage_account_name }}" - container: "{{ storage_container_name }}" - state: present - - - name: Checksum local file - ansible.builtin.stat: - path: "{{ image_source }}" - checksum_algorithm: sha256 - get_checksum: true - register: local_image_file - - - name: Set storage_blob_name to .vhd - ansible.builtin.set_fact: - storage_blob_name: "{{ local_image_file.stat.checksum }}.vhd" - - - name: Upload the image to blob storage - azure.azcollection.azure_rm_storageblob: - resource_group: "{{ resource_group_name }}" - storage_account_name: "{{ storage_account_name }}" - container: "{{ storage_container_name }}" - blob_name: "{{ storage_blob_name }}" - blob_type: page - source: "{{ image_source }}" - - # The ansible module doesn't include settings for sharing the gallery publicly - # so for the real thing that probably should be done manually (once). This is - # fine for local testing. - - name: Ensure the gallery exists - azure.azcollection.azure_rm_gallery: - resource_group: "{{ resource_group_name }}" - name: "{{ gallery_name }}" - location: "{{ location }}" - description: "{{ gallery_description }}" - - - name: Create or update gallery image - azure.azcollection.azure_rm_galleryimage: - resource_group: "{{ resource_group_name }}" - gallery_name: "{{ gallery_name }}" - name: "{{ gallery_image_name }}-{{ architecture }}" - release_note_uri: "{{ release_note_uri }}" - privacy_statement_uri: "{{ privacy_statement_uri }}" - location: "{{ location }}" - hypervgeneration: "V2" - architecture: "{{ architecture }}" - os_type: linux - os_state: generalized - identifier: - publisher: Fedora - offer: Cloud - sku: "{{ gallery_image_name }}-{{ architecture }}" - recommended: - memory: - min: 1 - v_cpus: - min: 1 - # See CONFIG_NR_CPUS - max: 8192 - features: - - name: SecurityType - value: TrustedLaunchSupported - - name: IsAcceleratedNetworkSupported - value: true - - name: DiskControllerTypes - value: NVMe,SCSI - - - name: Create a gallery image version from the VHD in blob storage - azure.azcollection.azure_rm_galleryimageversion: - resource_group: "{{ resource_group_name }}" - gallery_name: "{{ gallery_name }}" - gallery_image_name: "{{ gallery_image_name }}-{{ architecture }}" - name: "{{ gallery_image_version }}" - location: "{{ location }}" - publishing_profile: - end_of_life_date: "{{ end_of_life_date }}" - exclude_from_latest: "{{ exclude_from_latest }}" - target_regions: "{{ target_regions }}" - storage_profile: - os_disk: - host_caching: "ReadOnly" - source: - resource_group: "{{ resource_group_name }}" - storage_account: "{{ storage_account_name }}" - uri: "https://{{ storage_account_name }}.blob.core.windows.net/{{ storage_container_name }}/{{ storage_blob_name }}" diff --git a/fedora_image_uploader/__init__.py b/fedora_image_uploader/__init__.py new file mode 100644 index 0000000..b125f67 --- /dev/null +++ b/fedora_image_uploader/__init__.py @@ -0,0 +1,6 @@ +import os + +PLAYBOOKS = os.path.abspath(os.path.join(os.path.dirname(__file__), "playbooks/")) +__version__ = "0.3.0" + +from .handler import Uploader # noqa: F401, E402 diff --git a/fedora_image_uploader/cli.py b/fedora_image_uploader/cli.py new file mode 100644 index 0000000..587e48b --- /dev/null +++ b/fedora_image_uploader/cli.py @@ -0,0 +1,16 @@ +import json + +import click +from fedora_messaging import config, message + +from . import handler + + +@click.command() +@click.argument("message_file", type=click.File("r")) +def main(message_file): + """Process AMQP messages from a file, rather than connecting to the broker.""" + config.conf.setup_logging() + msg = message.load_message(json.load(message_file)) + consumer = handler.Uploader() + consumer(msg) diff --git a/fedora_image_uploader/handler.py b/fedora_image_uploader/handler.py new file mode 100644 index 0000000..d2b204e --- /dev/null +++ b/fedora_image_uploader/handler.py @@ -0,0 +1,396 @@ +import datetime +import hashlib +import logging +import lzma +import os +import subprocess +import tempfile +import time +from collections.abc import Iterable + +import ansible_runner +from azure import identity as az_identity +from azure.mgmt.compute import ComputeManagementClient +from fedfind import exceptions as ff_exceptions +from fedfind import helpers as ff_helpers +from fedfind import release as ff_release +from fedora_messaging import config +from fedora_messaging import exceptions as fm_exceptions +from fedora_messaging import message as fm_message +from requests import Session, adapters +from requests.exceptions import RequestException +from urllib3.util import Retry + +from . import PLAYBOOKS + +_log = logging.getLogger(__name__) + + +def _run(args: Iterable[str]): + """Run a command and handle errors.""" + _log.debug("image_uploader running command %s", " ".join(args)) + try: + ret = subprocess.run(args, encoding="utf-8", capture_output=True, timeout=7200) + except subprocess.TimeoutExpired: + _log.error("Command: %s timed out after two hours", " ".join(args)) + raise fm_exceptions.Nack() + except OSError as err: + _log.error("Command: %s caused error %s", " ".join(args), err) + raise fm_exceptions.Nack() + if ret.returncode: + _log.error("Command: %s returned %d", " ".join(args), ret.returncode) + _log.error("stdout: %s", ret.stdout) + _log.error("stderr: %s", ret.stderr) + raise fm_exceptions.Nack() + + +class Uploader: + + def __init__(self): + self.conf = config.conf["consumer_config"] + self.requests = Session() + retry_config = Retry(total=5, backoff_factor=1) + self.requests.mount("https://", adapters.HTTPAdapter(max_retries=retry_config)) + self.handlers = (self.handle_azure, self.handle_container) + # tracks the container repos we got images for, for manifest + # creation purposes + self.container_repos = dict() + + def __call__(self, message: fm_message.Message): + """ + Consumes Pungi messages and uploads images from finished composes. + """ + # We only care about finished composes, but we can't filter that out at the + # AMQP topic level. + if message.body.get("status") not in ("FINISHED", "FINISHED_INCOMPLETE"): + return + + try: + compose_id = message.body["compose_id"] + except KeyError: + _log.error("Message body is missing 'compose_id' key!") + return + + try: + ffrel = ff_release.get_release(cid=compose_id) + except ff_exceptions.UnsupportedComposeError: + _log.info("Skipping compose %s as it contains no images", compose_id) + return + # reset for each message + self.container_repos = dict() + try: + for image in ffrel.all_images: + for handler in self.handlers: + handler(image, ffrel) + except fm_exceptions.Nack: + # If we failed to process an image, it's not likely the failure will resolve + # itself in the time it takes to re-queue the message and then consume it again. + # This stops us from hammering the broker and spinning as fast as we can. + time.sleep(60) + raise + + if self.container_repos: + # manifest stuff + for repo in self.container_repos: + for registry in self.conf["container"]["registries"]: + # something like "registry.fedoraproject.org/fedora:40" + regname = f"{registry}/{repo}:{str(ffrel.relnum)}" + targets = [regname] + # we also create aliased manifests for rawhide and + # latest stable + if ffrel.release.lower() == "rawhide": + targets.append(f"{registry}/{repo}:rawhide") + elif ffrel.relnum == ff_helpers.get_current_release(branched=False): + targets.append(f"{registry}/{repo}:latest") + for target in targets: + # wipe the manifest if it exists already + _run(("buildah", "rmi", target)) + # create the manifest with all arches + createargs = ["buildah", "manifest", "create", target] + createargs.extend( + # it's intentional that this is regname not target + f"{regname}-{arch}" + for arch in self.container_repos[repo] + ) + _run(createargs) + # push it + pushargs = ( + "buildah", + "manifest", + "push", + target, + f"docker://{target}", + "--all", + ) + _run(pushargs) + + def download_image(self, image: dict, dest_dir: str, decompress=False) -> str: + """ + Download, verify, and optionally decompress the image. + + Args: + image (dict): An image from a `fedfind.release.Release`. + dest_dir (os.PathLike): Where to write the image. + decompress (bool): Whether or not to LZMA decompress the image as it's + downloaded. + """ + image_file_name = os.path.basename(image["path"]) + image_dest = os.path.join(dest_dir, image_file_name) + # Consider using getattr or something to make this work for new checksum + # algorithms. + checksum = hashlib.sha256(usedforsecurity=False) + expected_checksum = image["checksums"]["sha256"] + image_url = image["url"] + decompressor = None + if decompress: + image_dest = os.path.join(dest_dir, image_file_name.removesuffix(".xz")) + decompressor = lzma.LZMADecompressor() + + try: + with self.requests.get(image_url, stream=True, timeout=30) as req: + _log.info(f"Writing {image_url} to {image_dest}") + req.raise_for_status() + with open(image_dest, "wb") as fd: + for chunk in req.iter_content(chunk_size=16 * 1024): + checksum.update(chunk) + if decompressor and chunk: + chunk = decompressor.decompress(chunk) + fd.write(chunk) + except RequestException as e: + _log.error("Failed to download image from %s: %s", image_url, e) + raise fm_exceptions.Nack() + + actual_checksum = checksum.hexdigest().lower() + _log.info("Image downloaded (sha256:%s)", actual_checksum) + if decompressor and (not decompressor.eof or decompressor.unused_data): + _log.error( + "LZMA unexpected state: EOF %s, unused bytes: %d", + decompressor.eof, + len(decompressor.unused_data), + ) + raise fm_exceptions.Nack() + + if actual_checksum != expected_checksum.lower(): + _log.error( + "Image checksum (sha256:%s) does not match expected checksum (sha256:%s)", + actual_checksum, + expected_checksum, + ) + raise fm_exceptions.Nack() + + return image_dest + + def run_playbook(self, playbook: str, variables: dict, workdir: str): + """ + Execute Ansible playbook in workdir using variables. + + Args: + playbook (str): The path of the playbook to execute. + variables (dict): Variables to be used. + workdir (str): The path to the working directory to use. + """ + _log.info("Executing playbook %s", playbook) + result = ansible_runner.interface.run( + playbook=playbook, + timeout=30 * 60, + private_data_dir=workdir, + envvars={ + "ANSIBLE_HOME": workdir, + "ANSIBLE_COLLECTIONS_PATH": "/root/.ansible/collections", + }, + extravars=variables, + ) + if result.rc != 0: + _log.error(f"Playbook failed with return code {result.rc}") + raise fm_exceptions.Nack() + + def handle_azure(self, image: dict, ffrel: ff_release.Release): + """ + Handle Azure images. + """ + if image.get("subvariant") != "Cloud_Base" or image.get("type") != "vhd-compressed": + return + if image["arch"] not in ("x86_64", "aarch64"): + # unsupported arch + return + if ffrel.relnum < 40: + # images prior to F40 aren't supported + return + + with tempfile.TemporaryDirectory() as workdir: + image_path = self.download_image(image, workdir, decompress=True) + # Generate variables + if hasattr(ffrel, "label") and ffrel.label: + # These are in the format {milestone}-X.Z + (y_release, z_release) = ffrel.label.split("-")[1].split(".") + image_suffix = ( + ffrel.release + if ffrel.label.lower().startswith("rc") + else f"{ffrel.release}-Prerelease" + ) + else: + y_release = ffrel.metadata["composeinfo"]["payload"]["compose"]["date"] + z_release = ffrel.metadata["composeinfo"]["payload"]["compose"]["respin"] + image_suffix = ( + ffrel.release + if ffrel.release.lower() == "rawhide" + else f"{ffrel.release}-Prerelease" + ) + gallery_image_name = f"Fedora-Cloud-{image_suffix}" + image_version = f"{ffrel.relnum}.{y_release}.{z_release}" + eol = ffrel.eol + if not eol and ffrel.release.lower() != "rawhide": + # It's probably a pre-release or GA image. We can reasonably guess + # EOL will be at _least_ a year. Better to under-promise and over-deliver. + eol = datetime.datetime.today() + datetime.timedelta(days=365) + eol = eol.strftime("%Y-%m-%d") + + variables = { + "location": self.conf["azure"]["location"], + "resource_group_name": self.conf["azure"]["resource_group_name"], + "gallery_name": self.conf["azure"]["gallery_name"], + "gallery_description": self.conf["azure"]["gallery_description"], + "storage_account_name": self.conf["azure"]["storage_account_name"], + "storage_container_name": self.conf["azure"]["storage_container_name"], + "target_regions": self.conf["azure"]["target_regions"], + "architecture": {"x86_64": "x64", "aarch64": "Arm64"}[image["arch"]], + "image_source": image_path, + "gallery_image_name": gallery_image_name, + "gallery_image_version": image_version, + "end_of_life_date": eol, + "exclude_from_latest": True, + "ansible_remote_tmp": workdir, + } + playbook = os.path.join(PLAYBOOKS, "azure.yml") + self.run_playbook(playbook, variables, workdir) + try: + self.azure_cleanup_images() + except Exception: + _log.exception("Unable to clean up Azure images") + + def azure_cleanup_images(self): + """ + Remove old images from the Azure Compute Gallery. + """ + subscription = os.environ["AZURE_SUBSCRIPTION_ID"] + creds = az_identity.ClientSecretCredential( + tenant_id=os.environ["AZURE_TENANT"], + client_id=os.environ["AZURE_CLIENT_ID"], + client_secret=os.environ["AZURE_SECRET"], + ) + compute_client = ComputeManagementClient( + credential=creds, + subscription_id=subscription, + api_version="2023-07-03", + ) + + resource_group = self.conf["azure"]["resource_group_name"] + gallery_name = self.conf["azure"]["gallery_name"] + _log.info("Querying image definitions in gallery %s", gallery_name) + for image_definition in compute_client.gallery_images.list_by_gallery( + resource_group_name=resource_group, gallery_name=gallery_name + ): + end_of_life_images = [] + excluded_images = [] + _log.info("Querying image versions in definition %s", image_definition.name) + image_versions = list( + compute_client.gallery_image_versions.list_by_gallery_image( + resource_group_name=resource_group, + gallery_name=gallery_name, + gallery_image_name=image_definition.name, + ) + ) + for image_version in image_versions: + if ( + image_version.publishing_profile.exclude_from_latest + or "rawhide" in image_definition.name.lower() + ): + excluded_images.append(image_version) + if ( + image_version.publishing_profile.end_of_life_date + and image_version.publishing_profile.end_of_life_date + < datetime.datetime.now(datetime.UTC) + ): + end_of_life_images.append(image_version) + excluded_images.sort( + key=lambda version: version.publishing_profile.published_date, reverse=True + ) + + _log.info( + "Removing %d out of %d images from %s", + max(len(excluded_images) - 7, 0), + len(excluded_images), + image_definition.name, + ) + # Save the latest week of images that have been excluded from latest + for image in excluded_images[7:]: + compute_client.gallery_image_versions.begin_delete( + resource_group, gallery_name, image_definition.name, image.name + ) + _log.info( + f"Deleted image {image.name} (excluded from latest) from " + f"{image_definition.name} since 7 newer versions exist" + ) + for image in end_of_life_images: + compute_client.gallery_image_versions.begin_delete( + resource_group, gallery_name, image_definition.name, image.name + ) + _log.info( + f"Deleted image {image.name} from {image_definition.name} " + "since the image is end-of-life" + ) + + if len(image_versions) == 0: + compute_client.gallery_images.begin_delete( + resource_group, gallery_name, image_definition.name + ) + _log.info( + f"Deleted image definition {image_definition.name} since it has no versions" + ) + + def handle_container(self, image: dict, ffrel: ff_release.Release): + """Handle container images.""" + registries = self.conf.get("container", {}).get("registries") + if not registries: + # we can't do anything if no registries are configured + return + if image["type"] not in ("docker", "ociarchive"): + # not a known container image type + return + repos = { + "Container_Toolbox": "fedora-toolbox", + "Container_Minimal_Base": "fedora-minimal", + "Container_Base": "fedora", + "Silverblue": "fedora-silverblue", + "Kinoite": "fedora-kinoite", + "Onyx": "fedora-onyx", + "Sericea": "fedora-sericea", + } + repo = repos.get(image["subvariant"]) + if not repo: + _log.debug("Unknown subvariant %s", image["subvariant"]) + return + if image["type"] == "docker" and ffrel.relnum < 40: + # these are actual docker archive images + imgformat = "docker-archive" + else: + # all others are OCI archives; F40+ .oci.tar.xz images + # with type "docker" are xz-compressed OCI archives, + # .ociarchive images with type "ociarchive" are non- + # compressed OCI archives + imgformat = "oci-archive" + arch = image["arch"] + with tempfile.TemporaryDirectory() as workdir: + image_path = self.download_image(image, workdir, decompress=True) + for registry in registries: + args = [ + "skopeo", + "copy", + f"{imgformat}:{image_path}", + f"docker://{registry}/{repo}:{str(ffrel.relnum)}-{arch}", + ] + _run(args) + if repo in self.container_repos: + self.container_repos[repo].append(arch) + else: + self.container_repos[repo] = [arch] diff --git a/fedora_image_uploader/playbooks/azure.yml b/fedora_image_uploader/playbooks/azure.yml new file mode 100644 index 0000000..c6215f1 --- /dev/null +++ b/fedora_image_uploader/playbooks/azure.yml @@ -0,0 +1,165 @@ +# The user can either have already authenticated with the CLI (e.g. az login), +# or provide credentials via environment variables. The following environment +# variables are used by the azure collection: +# AZURE_PROFILE +# AZURE_SUBSCRIPTION_ID +# AZURE_CLIENT_ID +# AZURE_SECRET +# AZURE_TENANT +# AZURE_AD_USER +# AZURE_PASSWORD +# AZURE_CLOUD_ENVIRONMENT +# AZURE_CERT_VALIDATION_MODE +# AZURE_ADFS_AUTHORITY_URL +# AZURE_X509_CERTIFICATE_PATH +# AZURE_THUMBPRINT +# +# For example, if you added an app registration, created a client secret for it, and added it to a subscriptions +# access control with enough priveleges ("Contributor" role is over-broad, but enough for testing) to your +# subscription, you could provide the AZURE_SECRET, AZURE_TENANT, AZURE_CLIENT_ID, and AZURE_SUBSCRIPTION_ID +# variables to authenticate. +# +# The caller must define the following variables: +# architecture: The architecture of the image being uploaded; one of "x64" and "Arm64". +# image_source: The path on the local filesystem where the image is stored. +# gallery_image_name: Name of an image definition, which contains image versions. +# gallery_image_version: Image version; must be in Major.Minor.Patch format, each within a 32-bit integer range. +# end_of_life_date: ISO-8601 format date indicating when the operating system reaches end-of-life. Can be null. +# exclude_from_latest: boolean to indicate whether this should be marked as the latest image. +# If true, VMs deployed from the image definition rather than a specific +# version will use this version. +--- +- name: Create Fedora Azure marketplace image + hosts: localhost + vars: + # The Azure Compute Gallery name. + # + # Must be letters, numbers, underscores, and periods. Cannot begin or end with underscores or periods. + gallery_name: Fedora + gallery_description: | + The Fedora compute gallery. + gallery_image_name: "40" + release_note_uri: "https://docs.fedoraproject.org/en-US/fedora/f40/release-notes/" + privacy_statement_uri: "https://docs.fedoraproject.org/en-US/legal/privacy/" + + # List of dictionaries describing the replication rules for each Azure region. + target_regions: + - name: eastus2 + regional_replica_count: 2 + # https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview#types-of-storage-accounts + storage_account_type: Standard_LRS + + # The Azure region all resources are deployed in. + location: eastus2 + resource_group_name: fedora-ansible-test + # Must be between 3 and 24 characters, numbers and lowercase letters only, globally unique + # + # https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview#storage-account-name + storage_account_name: fedoraansibletest + # Container for a collection of blobs. + # + # Must be lowercase letters, numbers, and hyphens. Must begin with a letter or number. + # Must be between 3 and 63 characters long. + storage_container_name: fedora-vhds + + tasks: + + - name: Create a resource group for everything + azure.azcollection.azure_rm_resourcegroup: + name: "{{ resource_group_name }}" + location: "{{ location }}" + state: present + + - name: Create a storage account for image uploads + azure.azcollection.azure_rm_storageaccount: + resource_group: "{{ resource_group_name }}" + name: "{{ storage_account_name }}" + type: Standard_ZRS + kind: "StorageV2" + + - name: Create a container in the storage account for images + azure.azcollection.azure_rm_storageblob: + resource_group: "{{ resource_group_name }}" + storage_account_name: "{{ storage_account_name }}" + container: "{{ storage_container_name }}" + state: present + + - name: Checksum local file + ansible.builtin.stat: + path: "{{ image_source }}" + checksum_algorithm: sha256 + get_checksum: true + register: local_image_file + + - name: Set storage_blob_name to .vhd + ansible.builtin.set_fact: + storage_blob_name: "{{ local_image_file.stat.checksum }}.vhd" + + - name: Upload the image to blob storage + azure.azcollection.azure_rm_storageblob: + resource_group: "{{ resource_group_name }}" + storage_account_name: "{{ storage_account_name }}" + container: "{{ storage_container_name }}" + blob_name: "{{ storage_blob_name }}" + blob_type: page + source: "{{ image_source }}" + + # The ansible module doesn't include settings for sharing the gallery publicly + # so for the real thing that probably should be done manually (once). This is + # fine for local testing. + - name: Ensure the gallery exists + azure.azcollection.azure_rm_gallery: + resource_group: "{{ resource_group_name }}" + name: "{{ gallery_name }}" + location: "{{ location }}" + description: "{{ gallery_description }}" + + - name: Create or update gallery image + azure.azcollection.azure_rm_galleryimage: + resource_group: "{{ resource_group_name }}" + gallery_name: "{{ gallery_name }}" + name: "{{ gallery_image_name }}-{{ architecture }}" + release_note_uri: "{{ release_note_uri }}" + privacy_statement_uri: "{{ privacy_statement_uri }}" + location: "{{ location }}" + hypervgeneration: "V2" + architecture: "{{ architecture }}" + os_type: linux + os_state: generalized + identifier: + publisher: Fedora + offer: Cloud + sku: "{{ gallery_image_name }}-{{ architecture }}" + recommended: + memory: + min: 1 + v_cpus: + min: 1 + # See CONFIG_NR_CPUS + max: 8192 + features: + - name: SecurityType + value: TrustedLaunchSupported + - name: IsAcceleratedNetworkSupported + value: true + - name: DiskControllerTypes + value: NVMe,SCSI + + - name: Create a gallery image version from the VHD in blob storage + azure.azcollection.azure_rm_galleryimageversion: + resource_group: "{{ resource_group_name }}" + gallery_name: "{{ gallery_name }}" + gallery_image_name: "{{ gallery_image_name }}-{{ architecture }}" + name: "{{ gallery_image_version }}" + location: "{{ location }}" + publishing_profile: + end_of_life_date: "{{ end_of_life_date }}" + exclude_from_latest: "{{ exclude_from_latest }}" + target_regions: "{{ target_regions }}" + storage_profile: + os_disk: + host_caching: "ReadOnly" + source: + resource_group: "{{ resource_group_name }}" + storage_account: "{{ storage_account_name }}" + uri: "https://{{ storage_account_name }}.blob.core.windows.net/{{ storage_container_name }}/{{ storage_blob_name }}" diff --git a/pyproject.toml b/pyproject.toml index 7f5be43..c6f9b4d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,8 +3,8 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "fedora-cloud-image-uploader" -description = "An AMQP message consumer that uploads Fedora images to clouds." +name = "fedora-image-uploader" +description = "An AMQP message consumer that uploads Fedora images to remote registries." readme = "README.md" license = {file = "LICENSE"} dynamic = ["version"] @@ -86,10 +86,10 @@ test = [ ] [project.scripts] -fedora-cloud-image-uploader = "fedora_cloud_image_uploader.cli:main" +fedora-image-uploader = "fedora_image_uploader.cli:main" [tool.hatch.version] -path = "fedora_cloud_image_uploader/__init__.py" +path = "fedora_image_uploader/__init__.py" [tool.black] line-length = 100 @@ -99,5 +99,5 @@ profile = "black" [tool.coverage.run] source = [ - "fedora_cloud_image_uploader/", + "fedora_image_uploader/", ] diff --git a/tests/test_handler.py b/tests/test_handler.py index 7cefed2..63638a0 100644 --- a/tests/test_handler.py +++ b/tests/test_handler.py @@ -16,8 +16,8 @@ from azure.mgmt.compute.v2023_07_03.models import ( from fedora_messaging import config, exceptions, message from freezegun import freeze_time -from fedora_cloud_image_uploader import PLAYBOOKS, Uploader -from fedora_cloud_image_uploader.handler import _run +from fedora_image_uploader import PLAYBOOKS, Uploader +from fedora_image_uploader.handler import _run # disable fedfind caching, as it can cause things to be left out of # pyvcr cassettes @@ -37,10 +37,10 @@ def _mock_download_image(self, image: dict, dest_dir: str, decompress=False) -> @pytest.mark.vcr @mock.patch( - "fedora_cloud_image_uploader.handler.Uploader.download_image", + "fedora_image_uploader.handler.Uploader.download_image", lambda a, b, c, decompress: f"/test/{os.path.basename(b['path'].removesuffix('.xz'))}", ) -@mock.patch("fedora_cloud_image_uploader.handler.ansible_runner") +@mock.patch("fedora_image_uploader.handler.ansible_runner") @pytest.mark.parametrize( "compose", [ @@ -76,7 +76,7 @@ def test_gallery_name(mock_runner, fixtures_dir, azure_fm_conf, compose): {"container": {"registries": ["registry.fedoraproject.org", "quay.io/fedora"]}}, ) @mock.patch( - "fedora_cloud_image_uploader.handler.Uploader.download_image", + "fedora_image_uploader.handler.Uploader.download_image", lambda a, b, c, decompress: f"/test/{os.path.basename(b['path'].removesuffix('.xz'))}", ) @mock.patch("subprocess.run") @@ -177,7 +177,7 @@ def test_containers(mock_subrun, fixtures_dir, compose): @pytest.mark.vcr @mock.patch.dict(config.conf["consumer_config"], {"container": {}}) @mock.patch("subprocess.run") -@mock.patch("fedora_cloud_image_uploader.handler.Uploader.download_image") +@mock.patch("fedora_image_uploader.handler.Uploader.download_image") def test_containers_registries_not_configured(mock_dl, mock_run, fixtures_dir): """ Test we correctly skip container handling if registries are not @@ -201,10 +201,10 @@ def test_containers_registries_not_configured(mock_dl, mock_run, fixtures_dir): {"container": {"registries": ["registry.fedoraproject.org", "quay.io/fedora"]}}, ) @mock.patch( - "fedora_cloud_image_uploader.handler.Uploader.download_image", + "fedora_image_uploader.handler.Uploader.download_image", lambda a, b, c, decompress: f"/test/{os.path.basename(b['path'].removesuffix('.xz'))}", ) -@mock.patch("fedora_cloud_image_uploader.handler.ansible_runner") +@mock.patch("fedora_image_uploader.handler.ansible_runner") def test_old_unsupported_azure_compose(mock_runner, azure_fm_conf, fixtures_dir): mock_runner.interface.run.return_value.rc = 0 with open(os.path.join(fixtures_dir, "messages/unsupported_for_azure.json")) as fd: @@ -215,7 +215,7 @@ def test_old_unsupported_azure_compose(mock_runner, azure_fm_conf, fixtures_dir) assert mock_runner.interface.run.call_count == 0 -@mock.patch("fedora_cloud_image_uploader.handler.ansible_runner") +@mock.patch("fedora_image_uploader.handler.ansible_runner") @pytest.mark.vcr def test_eol_synthesis(mock_runner, fixtures_dir, azure_fm_conf): mock_runner.interface.run.return_value.rc = 0 @@ -248,7 +248,7 @@ def test_ansible_fail(mock_run, caplog): @mock.patch.dict(config.conf["consumer_config"], {"azure": {}}) -@mock.patch("fedora_cloud_image_uploader.handler.ansible_runner") +@mock.patch("fedora_image_uploader.handler.ansible_runner") def test_azure_filters(mock_runner): """Test the cases where AzureHandler should decide not to handle.""" ffrel = mock.MagicMock() @@ -269,7 +269,7 @@ def test_azure_filters(mock_runner): assert mock_runner.call_count == 0 -@mock.patch("fedora_cloud_image_uploader.handler.ansible_runner") +@mock.patch("fedora_image_uploader.handler.ansible_runner") def test_non_handled_messages(mock_runner, fixtures_dir, caplog): """ Test we correctly exit early on messages for non-finished composes, @@ -354,7 +354,7 @@ def test_download_image(): ret = consumer.download_image(image, tempdir, decompress=True) -@mock.patch("fedora_cloud_image_uploader.handler.ComputeManagementClient", autospec=True) +@mock.patch("fedora_image_uploader.handler.ComputeManagementClient", autospec=True) def test_azure_old_excluded_images(mock_az_client, azure_env_vars, azure_fm_conf): """ Test the image cleanup policy for removing all images excluded from latest except the @@ -387,7 +387,7 @@ def test_azure_old_excluded_images(mock_az_client, azure_env_vars, azure_fm_conf assert expected_calls == actual_calls -@mock.patch("fedora_cloud_image_uploader.handler.ComputeManagementClient", autospec=True) +@mock.patch("fedora_image_uploader.handler.ComputeManagementClient", autospec=True) def test_azure_end_of_life(mock_az_client, azure_env_vars, azure_fm_conf): """Test the image cleanup policy for removing end-of-life images.""" @@ -418,7 +418,7 @@ def test_azure_end_of_life(mock_az_client, azure_env_vars, azure_fm_conf): assert expected_calls == actual_calls -@mock.patch("fedora_cloud_image_uploader.handler.ComputeManagementClient", autospec=True) +@mock.patch("fedora_image_uploader.handler.ComputeManagementClient", autospec=True) def test_azure_empty_definitions(mock_az_client, azure_env_vars, azure_fm_conf): """ Test the image cleanup policy for definitions with no more images. @@ -439,7 +439,7 @@ def test_azure_empty_definitions(mock_az_client, azure_env_vars, azure_fm_conf): ) -@mock.patch("fedora_cloud_image_uploader.handler.ComputeManagementClient", autospec=True) +@mock.patch("fedora_image_uploader.handler.ComputeManagementClient", autospec=True) def test_azure_old_included_images(mock_az_client, azure_env_vars, azure_fm_conf): """ Test the image cleanup policy keeps all images included in latest that aren't EOL @@ -466,7 +466,7 @@ def test_azure_old_included_images(mock_az_client, azure_env_vars, azure_fm_conf assert client.gallery_image_versions.begin_delete.call_count == 0 -@mock.patch("fedora_cloud_image_uploader.handler.ComputeManagementClient", autospec=True) +@mock.patch("fedora_image_uploader.handler.ComputeManagementClient", autospec=True) def test_azure_rawhide_images(mock_az_client, azure_env_vars, azure_fm_conf): """ Test the image cleanup policy keeps only 7 rawhide images even if included in latest