From 60da27dafd72ec005b07bcee93295100953269e2 Mon Sep 17 00:00:00 2001
From: Aine Riordan <44700011+ariordan-redhat@users.noreply.github.com>
Date: Thu, 24 Jul 2025 21:51:11 +0100
Subject: [PATCH 01/71] Update release attribute and docinfo to 2.6 (#3902)
(#3903)
---
downstream/attributes/attributes.adoc | 4 ++--
downstream/titles/aap-containerized-install/docinfo.xml | 2 +-
downstream/titles/aap-hardening/docinfo.xml | 2 +-
downstream/titles/aap-installation-guide/docinfo.xml | 2 +-
downstream/titles/aap-migration/docinfo.xml | 2 +-
downstream/titles/aap-operations-guide/docinfo.xml | 2 +-
downstream/titles/aap-operator-backup/docinfo.xml | 2 +-
downstream/titles/aap-operator-installation/docinfo.xml | 2 +-
downstream/titles/aap-planning-guide/docinfo.xml | 2 +-
downstream/titles/aap-plugin-rhdh-install/docinfo.xml | 2 +-
downstream/titles/analytics/docinfo.xml | 2 +-
downstream/titles/automation-mesh/docinfo.xml | 2 +-
downstream/titles/builder/docinfo.xml | 2 +-
downstream/titles/central-auth/docinfo.xml | 2 +-
.../titles/controller/controller-admin-guide/docinfo.xml | 2 +-
.../titles/controller/controller-api-overview/docinfo.xml | 2 +-
.../titles/controller/controller-user-guide/docinfo.xml | 2 +-
downstream/titles/develop-automation-content/docinfo.xml | 2 +-
downstream/titles/eda/eda-user-guide/docinfo.xml | 2 +-
.../titles/edge-manager/edge-manager-user-guide/docinfo.xml | 2 +-
downstream/titles/getting-started/docinfo.xml | 2 +-
downstream/titles/hub/managing-content/docinfo.xml | 2 +-
downstream/titles/navigator-guide/docinfo.xml | 2 +-
downstream/titles/ocp_performance_guide/docinfo.xml | 2 +-
downstream/titles/operator-mesh/docinfo.xml | 2 +-
.../titles/playbooks/playbooks-getting-started/docinfo.xml | 2 +-
downstream/titles/playbooks/playbooks-reference/docinfo.xml | 2 +-
downstream/titles/release-notes/docinfo.xml | 2 +-
downstream/titles/security-guide/docinfo.xml | 2 +-
downstream/titles/self-service-install/docinfo.xml | 2 +-
downstream/titles/self-service-using/docinfo.xml | 2 +-
.../terraform-aap/terraform-aap-getting-started/docinfo.xml | 2 +-
downstream/titles/topologies/docinfo.xml | 2 +-
downstream/titles/troubleshooting-aap/docinfo.xml | 2 +-
downstream/titles/upgrade/docinfo.xml | 2 +-
35 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/downstream/attributes/attributes.adoc b/downstream/attributes/attributes.adoc
index ef0f2df9c5..c37f2ff991 100644
--- a/downstream/attributes/attributes.adoc
+++ b/downstream/attributes/attributes.adoc
@@ -6,13 +6,13 @@
:AAPCentralAuth: Ansible Automation Platform Central Authentication
:CentralAuthStart: Central authentication
:CentralAuth: central authentication
-:PlatformVers: 2.5
+:PlatformVers: 2.6
:PostgresVers: PostgreSQL 15
//The ansible-core version used to install AAP
:CoreInstVers: 2.14
//The ansible-core version used by the AAP control plane and EEs
:CoreUseVers: 2.16
-:PlatformDownloadUrl: https://access.redhat.com/downloads/content/480/ver=2.5/rhel---9/2.5/x86_64/product-software
+:PlatformDownloadUrl: https://access.redhat.com/downloads/content/480/ver=2.6/rhel---9/2.6/x86_64/product-software
:BaseURL: https://docs.redhat.com/en/documentation
:VMBase: VM-based installation
:Installer: installation program
diff --git a/downstream/titles/aap-containerized-install/docinfo.xml b/downstream/titles/aap-containerized-install/docinfo.xml
index 47f1005992..c9143dc17b 100644
--- a/downstream/titles/aap-containerized-install/docinfo.xml
+++ b/downstream/titles/aap-containerized-install/docinfo.xml
@@ -1,6 +1,6 @@
Containerized installation
Red Hat Ansible Automation Platform
-2.5
+2.6
Install the containerized version of Ansible Automation Platform
This guide helps you to understand the installation requirements and processes behind our containerized version of Ansible Automation Platform.
diff --git a/downstream/titles/aap-hardening/docinfo.xml b/downstream/titles/aap-hardening/docinfo.xml
index a5430ad33c..c6256b39f1 100644
--- a/downstream/titles/aap-hardening/docinfo.xml
+++ b/downstream/titles/aap-hardening/docinfo.xml
@@ -1,6 +1,6 @@
Hardening and compliance
Red Hat Ansible Automation Platform
-2.5
+2.6
Install, configure, and maintain Ansible Automation Platform running on Red Hat Enterprise Linux in a secure manner
This guide provides recommended practices for various processes needed to install, configure, and maintain {PlatformNameShort} on Red Hat Enterprise Linux in a secure manner.
diff --git a/downstream/titles/aap-installation-guide/docinfo.xml b/downstream/titles/aap-installation-guide/docinfo.xml
index 1153b284cc..621715bbf9 100644
--- a/downstream/titles/aap-installation-guide/docinfo.xml
+++ b/downstream/titles/aap-installation-guide/docinfo.xml
@@ -1,6 +1,6 @@
RPM installation
Red Hat Ansible Automation Platform
-2.5
+2.6
Install the RPM version of Ansible Automation Platform
This guide shows you how to install Red Hat Ansible Automation Platform based on supported installation scenarios.
diff --git a/downstream/titles/aap-migration/docinfo.xml b/downstream/titles/aap-migration/docinfo.xml
index b307c93a17..fb4aaab677 100644
--- a/downstream/titles/aap-migration/docinfo.xml
+++ b/downstream/titles/aap-migration/docinfo.xml
@@ -1,6 +1,6 @@
Ansible Automation Platform migration
Red Hat Ansible Automation Platform
-2.5
+2.6
Migrate your deployment of Ansible Automation Platform from one installation type to another
diff --git a/downstream/titles/aap-operations-guide/docinfo.xml b/downstream/titles/aap-operations-guide/docinfo.xml
index 5ff53ae44c..2c3234d608 100644
--- a/downstream/titles/aap-operations-guide/docinfo.xml
+++ b/downstream/titles/aap-operations-guide/docinfo.xml
@@ -1,6 +1,6 @@
Operating Ansible Automation Platform
Red Hat Ansible Automation Platform
-2.5
+2.6
Post installation configurations to ensure a smooth deployment of Ansible Automation Platform installation
diff --git a/downstream/titles/aap-operator-backup/docinfo.xml b/downstream/titles/aap-operator-backup/docinfo.xml
index 8b76b1f66a..01906ad090 100644
--- a/downstream/titles/aap-operator-backup/docinfo.xml
+++ b/downstream/titles/aap-operator-backup/docinfo.xml
@@ -1,6 +1,6 @@
Backup and recovery for operator environments
Red Hat Ansible Automation Platform
-2.5
+2.6
Safeguard against data loss with backup and recovery of Ansible Automation Platform operator on OpenShift Container Platform
diff --git a/downstream/titles/aap-operator-installation/docinfo.xml b/downstream/titles/aap-operator-installation/docinfo.xml
index 2b16dbedcf..0c50f1a2bb 100644
--- a/downstream/titles/aap-operator-installation/docinfo.xml
+++ b/downstream/titles/aap-operator-installation/docinfo.xml
@@ -1,6 +1,6 @@
Installing on OpenShift Container Platform
Red Hat Ansible Automation Platform
-2.5
+2.6
Install and configure Ansible Automation Platform operator on OpenShift Container Platform
This guide provides procedures and reference information for the supported installation scenarios for the Red Hat Ansible Automation Platform operator on OpenShift Container Platform.
diff --git a/downstream/titles/aap-planning-guide/docinfo.xml b/downstream/titles/aap-planning-guide/docinfo.xml
index 1d7ad68203..663cc79ad1 100644
--- a/downstream/titles/aap-planning-guide/docinfo.xml
+++ b/downstream/titles/aap-planning-guide/docinfo.xml
@@ -1,6 +1,6 @@
Planning your installation
Red Hat Ansible Automation Platform
-2.5
+2.6
Plan for installation of Ansible Automation Platform
diff --git a/downstream/titles/aap-plugin-rhdh-install/docinfo.xml b/downstream/titles/aap-plugin-rhdh-install/docinfo.xml
index 8445ef71f4..9adf1e0c8c 100644
--- a/downstream/titles/aap-plugin-rhdh-install/docinfo.xml
+++ b/downstream/titles/aap-plugin-rhdh-install/docinfo.xml
@@ -1,6 +1,6 @@
Installing Ansible plug-ins for Red Hat Developer Hub
Red Hat Ansible Automation Platform
-2.5
+2.6
Install and configure Ansible plug-ins for Red Hat Developer Hub
This guide describes how to install and configure Ansible plug-ins for Red Hat Developer Hub so that users can learn about Ansible, explore curated collections, and develop automation projects.
diff --git a/downstream/titles/analytics/docinfo.xml b/downstream/titles/analytics/docinfo.xml
index a15b8b438d..39215c69fe 100644
--- a/downstream/titles/analytics/docinfo.xml
+++ b/downstream/titles/analytics/docinfo.xml
@@ -1,6 +1,6 @@
Using automation analytics
Red Hat Ansible Automation Platform
-2.5
+2.6
Evaluate the cost savings associated with automated processes
This guide shows how to use the features of automation analytics to evaluate how automation is deployed across your environments and the savings associated with it.
diff --git a/downstream/titles/automation-mesh/docinfo.xml b/downstream/titles/automation-mesh/docinfo.xml
index 21d82d8834..2ca8c07e96 100644
--- a/downstream/titles/automation-mesh/docinfo.xml
+++ b/downstream/titles/automation-mesh/docinfo.xml
@@ -1,6 +1,6 @@
Automation mesh for VM environments
Red Hat Ansible Automation Platform
-2.5
+2.6
Automate at scale in a cloud-native way
This guide shows how to deploy automation mesh as part of your VM-based Ansible Automation Platform environment.
diff --git a/downstream/titles/builder/docinfo.xml b/downstream/titles/builder/docinfo.xml
index 55235fd4a1..30c1885715 100644
--- a/downstream/titles/builder/docinfo.xml
+++ b/downstream/titles/builder/docinfo.xml
@@ -1,6 +1,6 @@
Creating and using execution environments
Red Hat Ansible Automation Platform
-2.5
+2.6
Create and use execution environment containers
This guide shows how to create consistent and reproducible automation execution environments for your Red Hat Ansible Automation Platform.
diff --git a/downstream/titles/central-auth/docinfo.xml b/downstream/titles/central-auth/docinfo.xml
index 8a66af0c12..33a0664ee0 100644
--- a/downstream/titles/central-auth/docinfo.xml
+++ b/downstream/titles/central-auth/docinfo.xml
@@ -1,6 +1,6 @@
Access management and authentication
Red Hat Ansible Automation Platform
-2.5
+2.6
Configure role based access control, authenticators and authenticator maps in Ansible Automation Platform
diff --git a/downstream/titles/controller/controller-admin-guide/docinfo.xml b/downstream/titles/controller/controller-admin-guide/docinfo.xml
index 6e76c749b6..735933cf2a 100644
--- a/downstream/titles/controller/controller-admin-guide/docinfo.xml
+++ b/downstream/titles/controller/controller-admin-guide/docinfo.xml
@@ -1,6 +1,6 @@
Configuring automation execution
Red Hat Ansible Automation Platform
-2.5
+2.6
Learn how to manage, monitor, and use automation controller
This guide shows how to manage automation controller with custom scripts, management jobs, and more.
diff --git a/downstream/titles/controller/controller-api-overview/docinfo.xml b/downstream/titles/controller/controller-api-overview/docinfo.xml
index 7047335f62..5b1885eef3 100644
--- a/downstream/titles/controller/controller-api-overview/docinfo.xml
+++ b/downstream/titles/controller/controller-api-overview/docinfo.xml
@@ -1,6 +1,6 @@
Automation execution API overview
Red Hat Ansible Automation Platform
-2.5
+2.6
Developer overview for the {ControllerName} API
Explore the {ControllerName} API Overview for streamlined automation solutions, empowering developers and administrators with efficient infrastructure management.
diff --git a/downstream/titles/controller/controller-user-guide/docinfo.xml b/downstream/titles/controller/controller-user-guide/docinfo.xml
index 0138758634..3df7c804f2 100644
--- a/downstream/titles/controller/controller-user-guide/docinfo.xml
+++ b/downstream/titles/controller/controller-user-guide/docinfo.xml
@@ -1,6 +1,6 @@
Using automation execution
Red Hat Ansible Automation Platform
-2.5
+2.6
Use automation execution to deploy, define, operate, scale and delegate automation
This guide shows you how to use automation controller to define, operate, scale and delegate automation across your enterprise.
diff --git a/downstream/titles/develop-automation-content/docinfo.xml b/downstream/titles/develop-automation-content/docinfo.xml
index 1473821ead..bab86d6bd8 100644
--- a/downstream/titles/develop-automation-content/docinfo.xml
+++ b/downstream/titles/develop-automation-content/docinfo.xml
@@ -1,6 +1,6 @@
Developing automation content
Red Hat Ansible Automation Platform
-2.5
+2.6
Develop Ansible automation content to run automation jobs
This guide describes how to develop Ansible automation content and how to use it to run automation jobs from Red Hat Ansible Automation Platforms.
diff --git a/downstream/titles/eda/eda-user-guide/docinfo.xml b/downstream/titles/eda/eda-user-guide/docinfo.xml
index 4a544938ec..90c4892452 100644
--- a/downstream/titles/eda/eda-user-guide/docinfo.xml
+++ b/downstream/titles/eda/eda-user-guide/docinfo.xml
@@ -1,6 +1,6 @@
Using automation decisions
Red Hat Ansible Automation Platform
-2.5
+2.6
Configure and use {EDAcontroller} to enhance and expand automation
Learn how to configure your {EDAcontroller} to set up credentials, new projects, decision environments, tokens to authenticate to Ansible Automation Platform Controller, and rulebook activation.
diff --git a/downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml b/downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml
index d7492d723a..31377ecbfa 100644
--- a/downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml
+++ b/downstream/titles/edge-manager/edge-manager-user-guide/docinfo.xml
@@ -1,6 +1,6 @@
Managing device fleets with the Red Hat Edge Manager
Red Hat Ansible Automation Platform
-2.5
+2.6
Install, configure, and use the Red Hat Edge Manager to manage individual and fleets of devices
Learn about components that you can use for scalable and secure edge management.
diff --git a/downstream/titles/getting-started/docinfo.xml b/downstream/titles/getting-started/docinfo.xml
index e1b8436be6..1950768d09 100644
--- a/downstream/titles/getting-started/docinfo.xml
+++ b/downstream/titles/getting-started/docinfo.xml
@@ -1,6 +1,6 @@
Getting started with Ansible Automation Platform
Red Hat Ansible Automation Platform
-2.5
+2.6
Get started with Ansible Automation Platform
This guide shows how to get started with Ansible Automation Platform.
diff --git a/downstream/titles/hub/managing-content/docinfo.xml b/downstream/titles/hub/managing-content/docinfo.xml
index 26b6154d33..efe3b1ce54 100644
--- a/downstream/titles/hub/managing-content/docinfo.xml
+++ b/downstream/titles/hub/managing-content/docinfo.xml
@@ -1,6 +1,6 @@
Managing automation content
Red Hat Ansible Automation Platform
-2.5
+2.6
Create and manage collections, content and repositories in automation hub
This guide shows you how to create, edit, delete, and move content in automation hub.
diff --git a/downstream/titles/navigator-guide/docinfo.xml b/downstream/titles/navigator-guide/docinfo.xml
index 4d80c41c38..318c9fdb5c 100644
--- a/downstream/titles/navigator-guide/docinfo.xml
+++ b/downstream/titles/navigator-guide/docinfo.xml
@@ -1,6 +1,6 @@
Using content navigator
Red Hat Ansible Automation Platform
-2.5
+2.6
Develop content that is compatible with Ansible Automation Platform
diff --git a/downstream/titles/ocp_performance_guide/docinfo.xml b/downstream/titles/ocp_performance_guide/docinfo.xml
index 0771bd481b..aeef0254f4 100644
--- a/downstream/titles/ocp_performance_guide/docinfo.xml
+++ b/downstream/titles/ocp_performance_guide/docinfo.xml
@@ -1,6 +1,6 @@
Performance considerations for operator environments
Red Hat Ansible Automation Platform
-2.5
+2.6
Configure automation controller for improved performance on operator based installations
diff --git a/downstream/titles/operator-mesh/docinfo.xml b/downstream/titles/operator-mesh/docinfo.xml
index d7f34fed3b..3470fce58f 100644
--- a/downstream/titles/operator-mesh/docinfo.xml
+++ b/downstream/titles/operator-mesh/docinfo.xml
@@ -1,6 +1,6 @@
Automation mesh for managed cloud or operator environments
Red Hat Ansible Automation Platform
-2.5
+2.6
Automate at scale in a cloud-native way
This guide shows how to deploy automation mesh as part of your operator-based Ansible Automation Platform environment.
diff --git a/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml b/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml
index 5dc012241f..dae897928d 100644
--- a/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml
+++ b/downstream/titles/playbooks/playbooks-getting-started/docinfo.xml
@@ -1,6 +1,6 @@
Getting started with playbooks
Red Hat Ansible Automation Platform
-2.5
+2.6
Get started with Ansible Playbooks
This guide shows how to create and use playbooks to address your automation requirements.
diff --git a/downstream/titles/playbooks/playbooks-reference/docinfo.xml b/downstream/titles/playbooks/playbooks-reference/docinfo.xml
index 58966318a4..785035a5cd 100644
--- a/downstream/titles/playbooks/playbooks-reference/docinfo.xml
+++ b/downstream/titles/playbooks/playbooks-reference/docinfo.xml
@@ -1,6 +1,6 @@
Reference guide to Ansible Playbooks
Red Hat Ansible Automation Platform
-2.5
+2.6
Learn about the different approaches for creating playbooks
This guide provides a reference for the differing approaches to the creating of Ansible playbooks.
diff --git a/downstream/titles/release-notes/docinfo.xml b/downstream/titles/release-notes/docinfo.xml
index 097f5a79e4..9c946d5e4f 100644
--- a/downstream/titles/release-notes/docinfo.xml
+++ b/downstream/titles/release-notes/docinfo.xml
@@ -1,6 +1,6 @@
Release notes
Red Hat Ansible Automation Platform
-2.5
+2.6
New features, enhancements, and bug fix information
diff --git a/downstream/titles/security-guide/docinfo.xml b/downstream/titles/security-guide/docinfo.xml
index fb847664c8..b769853b80 100644
--- a/downstream/titles/security-guide/docinfo.xml
+++ b/downstream/titles/security-guide/docinfo.xml
@@ -1,6 +1,6 @@
Implementing security automation
Red Hat Ansible Automation Platform
-2.5
+2.6
Identify and manage security events using Ansible
This guide provides procedures for automating and streamlining various security processes needed to identify, triage, and respond to security events using Ansible.
diff --git a/downstream/titles/self-service-install/docinfo.xml b/downstream/titles/self-service-install/docinfo.xml
index 6eae8aa3ed..21a5562a0c 100644
--- a/downstream/titles/self-service-install/docinfo.xml
+++ b/downstream/titles/self-service-install/docinfo.xml
@@ -1,6 +1,6 @@
Installing Ansible Automation Platform self-service technology preview
Red Hat Ansible Automation Platform
-2.5
+2.6
Install and configure Ansible Automation Platform self-service technology preview
This guide describes how to install and configure Ansible Automation Platform self-service technology preview so that users can run automation.
diff --git a/downstream/titles/self-service-using/docinfo.xml b/downstream/titles/self-service-using/docinfo.xml
index 985a3dcc23..be9e80274c 100644
--- a/downstream/titles/self-service-using/docinfo.xml
+++ b/downstream/titles/self-service-using/docinfo.xml
@@ -1,6 +1,6 @@
Using Ansible Automation Platform self-service technology preview
Red Hat Ansible Automation Platform
-2.5
+2.6
Use Ansible Automation Platform self-service technology preview
This guide describes how to use Ansible Automation Platform self-service technology preview to implement role-based access control and run automation.
diff --git a/downstream/titles/terraform-aap/terraform-aap-getting-started/docinfo.xml b/downstream/titles/terraform-aap/terraform-aap-getting-started/docinfo.xml
index 4c420f3afc..9dd6558c9e 100644
--- a/downstream/titles/terraform-aap/terraform-aap-getting-started/docinfo.xml
+++ b/downstream/titles/terraform-aap/terraform-aap-getting-started/docinfo.xml
@@ -1,6 +1,6 @@
Getting started with Terraform and Ansible Automation Platform
Red Hat Ansible Automation Platform
-2.5
+2.6
Integrate Terraform with Ansible Automation Platform
Learn how to configure Ansible Automation Platform with Terraform Enterprise or HCP Terraform, and migrate from Terraform Community.
diff --git a/downstream/titles/topologies/docinfo.xml b/downstream/titles/topologies/docinfo.xml
index e29d8807e5..0ffc6bf4c3 100644
--- a/downstream/titles/topologies/docinfo.xml
+++ b/downstream/titles/topologies/docinfo.xml
@@ -1,6 +1,6 @@
Tested deployment models
Red Hat Ansible Automation Platform
-2.5
+2.6
Plan your deployment of Ansible Automation Platform
diff --git a/downstream/titles/troubleshooting-aap/docinfo.xml b/downstream/titles/troubleshooting-aap/docinfo.xml
index 4375bb5626..97233bf22e 100644
--- a/downstream/titles/troubleshooting-aap/docinfo.xml
+++ b/downstream/titles/troubleshooting-aap/docinfo.xml
@@ -1,6 +1,6 @@
Troubleshooting Ansible Automation Platform
Red Hat Ansible Automation Platform
-2.5
+2.6
Troubleshoot issues with Ansible Automation Platform
diff --git a/downstream/titles/upgrade/docinfo.xml b/downstream/titles/upgrade/docinfo.xml
index 0def8006be..4b483cfa6e 100644
--- a/downstream/titles/upgrade/docinfo.xml
+++ b/downstream/titles/upgrade/docinfo.xml
@@ -1,6 +1,6 @@
RPM upgrade and migration
Red Hat Ansible Automation Platform
-2.5
+2.6
Upgrade and migrate legacy deployments of Ansible Automation Platform
This guide shows you how to upgrade to the latest version of Ansible Automation Platform and migrate legacy virtual environments to automation execution environments.
From b8be4bd525dc09683edbf46b8d492f7aac923fbf Mon Sep 17 00:00:00 2001
From: Aine Riordan <44700011+ariordan-redhat@users.noreply.github.com>
Date: Thu, 24 Jul 2025 22:00:39 +0100
Subject: [PATCH 02/71] 2.6 Add sync scripts for 2.6 (#3904)
---
bin/sync_docs.sh | 19 +++++++++++++++++++
bin/sync_ocp_latest.sh | 13 +++++++++++++
2 files changed, 32 insertions(+)
create mode 100644 bin/sync_docs.sh
create mode 100644 bin/sync_ocp_latest.sh
diff --git a/bin/sync_docs.sh b/bin/sync_docs.sh
new file mode 100644
index 0000000000..6b383e4548
--- /dev/null
+++ b/bin/sync_docs.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+######
+# This script synchronizes content to the downstream repository.
+# A Jenkins job configures the source and target repositories and runs this script directly.
+# If you commit changes to this script you should verify the Jenkins job runs successfully.
+######
+
+# Set the path to the source and target directories.
+# The source directory contains the content that you want to synchronize.
+source=source
+# The target directory is the location where you want to synchronize content.
+target=target
+
+# Clean the existing downstream and release-note folders.
+rm -rf $target/downstream
+
+# Copy the content of the downstream and release-note folders.
+cp -r $source/downstream $target/downstream
diff --git a/bin/sync_ocp_latest.sh b/bin/sync_ocp_latest.sh
new file mode 100644
index 0000000000..217c388759
--- /dev/null
+++ b/bin/sync_ocp_latest.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# Set the path to the file that contains the ":OCPLatest:" attribute.
+attributes=../downstream/attributes/attributes.adoc
+
+# Retrieve the OCP attributes file from the OpenShift docs repository.
+curl https://raw.githubusercontent.com/openshift/openshift-docs/main/_attributes/ocp-attributes.adoc -o ocp-attributes.adoc
+# Save the value of the "product-version" attribute as a variable.
+ocpversion=$(sed -n -e 's/^:product-version: //p' ocp-attributes.adoc)
+# Replace the value of the "OCPLatest" attribute with the value of the "product-version" attribute.
+sed -i -e "s/^:OCPLatest:.*/:OCPLatest: $ocpversion/" $attributes
+# Delete the OCP attributes file.
+rm -f ocp-attributes.adoc
From 861eb1f65607c253fccf89540eb713d9934ab17a Mon Sep 17 00:00:00 2001
From: Aine Riordan <44700011+ariordan-redhat@users.noreply.github.com>
Date: Thu, 24 Jul 2025 22:30:52 +0100
Subject: [PATCH 03/71] 2.6: Archive 2.5 updating doc (#3905)
---
.../archived-titles}/updating-aap/docinfo.xml | 0
.../archived-titles}/updating-aap/master.adoc | 2 +-
downstream/titles/updating-aap/aap-common | 1 -
downstream/titles/updating-aap/attributes | 1 -
downstream/titles/updating-aap/images | 1 -
downstream/titles/updating-aap/platform | 1 -
6 files changed, 1 insertion(+), 5 deletions(-)
rename downstream/{titles => archive/archived-titles}/updating-aap/docinfo.xml (100%)
rename downstream/{titles => archive/archived-titles}/updating-aap/master.adoc (92%)
delete mode 120000 downstream/titles/updating-aap/aap-common
delete mode 120000 downstream/titles/updating-aap/attributes
delete mode 120000 downstream/titles/updating-aap/images
delete mode 120000 downstream/titles/updating-aap/platform
diff --git a/downstream/titles/updating-aap/docinfo.xml b/downstream/archive/archived-titles/updating-aap/docinfo.xml
similarity index 100%
rename from downstream/titles/updating-aap/docinfo.xml
rename to downstream/archive/archived-titles/updating-aap/docinfo.xml
diff --git a/downstream/titles/updating-aap/master.adoc b/downstream/archive/archived-titles/updating-aap/master.adoc
similarity index 92%
rename from downstream/titles/updating-aap/master.adoc
rename to downstream/archive/archived-titles/updating-aap/master.adoc
index 27bb46df48..1c695c6539 100644
--- a/downstream/titles/updating-aap/master.adoc
+++ b/downstream/archive/archived-titles/updating-aap/master.adoc
@@ -21,4 +21,4 @@ Upgrades from 2.4 to 2.5 are unsupported at this time. For more information, see
include::platform/assembly-update-rpm.adoc[leveloffset=+1]
include::platform/assembly-update-container.adoc[leveloffset=+1]
-// [hherbly]: moved to Installing on OCP guide per AAP-34122 include::platform/assembly-update-ocp.adoc[leveloffset=+1]
\ No newline at end of file
+// [hherbly]: moved to Installing on OCP guide per AAP-34122 include::platform/assembly-update-ocp.adoc[leveloffset=+1]
diff --git a/downstream/titles/updating-aap/aap-common b/downstream/titles/updating-aap/aap-common
deleted file mode 120000
index 472eeb4dac..0000000000
--- a/downstream/titles/updating-aap/aap-common
+++ /dev/null
@@ -1 +0,0 @@
-../../aap-common
\ No newline at end of file
diff --git a/downstream/titles/updating-aap/attributes b/downstream/titles/updating-aap/attributes
deleted file mode 120000
index a5caaa73a5..0000000000
--- a/downstream/titles/updating-aap/attributes
+++ /dev/null
@@ -1 +0,0 @@
-../../attributes
\ No newline at end of file
diff --git a/downstream/titles/updating-aap/images b/downstream/titles/updating-aap/images
deleted file mode 120000
index 5fa6987088..0000000000
--- a/downstream/titles/updating-aap/images
+++ /dev/null
@@ -1 +0,0 @@
-../../images
\ No newline at end of file
diff --git a/downstream/titles/updating-aap/platform b/downstream/titles/updating-aap/platform
deleted file mode 120000
index 06b49528ee..0000000000
--- a/downstream/titles/updating-aap/platform
+++ /dev/null
@@ -1 +0,0 @@
-../../assemblies/platform
\ No newline at end of file
From b40117f6dfde092b2b98800acbac243956c05afd Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Fri, 25 Jul 2025 09:50:58 +0100
Subject: [PATCH 04/71] Update Containerized installation for AAP 2.6 (#3906)
(#3907)
Update Containerized installation for AAP 2.6
https://issues.redhat.com/browse/AAP-48592
---
...sembly-aap-containerized-installation.adoc | 7 -----
.../proc-downloading-containerized-aap.adoc | 4 +--
...-hub-collection-and-container-signing.adoc | 2 +-
.../platform/proc-restore-aap-container.adoc | 6 ++---
.../platform/proc-update-aap-container.adoc | 4 +--
.../ref-containerized-troubleshoot-ref.adoc | 26 ++-----------------
.../ref-images-inventory-variables.adoc | 22 ++++++++--------
.../snippets/cont-tested-system-config.adoc | 10 ++++---
.../snippets/inventory-cont-a-env-a.adoc | 14 +++++-----
.../snippets/inventory-cont-b-env-a.adoc | 12 ++++-----
10 files changed, 40 insertions(+), 67 deletions(-)
diff --git a/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc b/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc
index 19dc525c7b..1dacd46dc3 100644
--- a/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc
+++ b/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc
@@ -10,13 +10,6 @@ ifdef::context[:parent-context: {context}]
This guide helps you to understand the installation requirements and processes behind the containerized version of {PlatformNameShort}.
-[NOTE]
-====
-
-include::snippets/container-upgrades.adoc[]
-
-====
-
== Tested deployment models
Red Hat tests {PlatformNameShort} {PlatformVers} with a defined set of topologies to give you opinionated deployment options. The supported topologies include infrastructure topology diagrams, tested system configurations, example inventory files, and network ports information.
diff --git a/downstream/modules/platform/proc-downloading-containerized-aap.adoc b/downstream/modules/platform/proc-downloading-containerized-aap.adoc
index 5c724c9ec0..0e059a3775 100644
--- a/downstream/modules/platform/proc-downloading-containerized-aap.adoc
+++ b/downstream/modules/platform/proc-downloading-containerized-aap.adoc
@@ -36,13 +36,13 @@ scp -i ansible-automation-platform-containerized-setup-.tar.gz
+$ tar xfvz ansible-automation-platform-containerized-setup-.tar.gz
----
+
.. To unpack the offline or bundled installer:
+
----
-$ tar xfvz ansible-automation-platform-containerized-setup-bundle--.tar.gz
+$ tar xfvz ansible-automation-platform-containerized-setup-bundle--.tar.gz
----
[role="_additional-resources"]
diff --git a/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc b/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
index 7de1eeacbb..3f6a1e3a27 100644
--- a/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
+++ b/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
@@ -38,7 +38,7 @@ The algorithm and cipher used is the responsibility of the customer.
.Procedure
-. On a RHEL9 server run the following command to create a new key pair for collection signing:
+. On a RHEL server run the following command to create a new key pair for collection signing:
+
----
gpg --gen-key
diff --git a/downstream/modules/platform/proc-restore-aap-container.adoc b/downstream/modules/platform/proc-restore-aap-container.adoc
index 4ad37c217e..cba4808307 100644
--- a/downstream/modules/platform/proc-restore-aap-container.adoc
+++ b/downstream/modules/platform/proc-restore-aap-container.adoc
@@ -39,7 +39,7 @@ Restoring to a different environment with different hostnames is not recommended
For example:
+
----
-$ cd ansible-automation-platform-containerized-setup-2.5-XX/backups
+$ cd ansible-automation-platform-containerized-setup-/backups
----
+
----
@@ -53,7 +53,7 @@ $ tar tvf gateway_env1-gateway-node1.tar.gz | grep db
For example:
+
----
-$ cd ansible-automation-platform-containerized-setup-2.5-XX/backups
+$ cd ansible-automation-platform-containerized-setup-/backups
----
+
----
@@ -64,7 +64,7 @@ $ mv gateway_env1-gateway-node1.tar.gz gateway_env2-gateway-node1.tar.gz
For example:
+
----
-$ cd ansible-automation-platform-containerized-setup-2.5-XX
+$ cd ansible-automation-platform-containerized-setup-
----
+
----
diff --git a/downstream/modules/platform/proc-update-aap-container.adoc b/downstream/modules/platform/proc-update-aap-container.adoc
index 240528c6ff..7a8b356968 100644
--- a/downstream/modules/platform/proc-update-aap-container.adoc
+++ b/downstream/modules/platform/proc-update-aap-container.adoc
@@ -3,9 +3,7 @@
= Updating containerized {PlatformNameShort}
-Perform a patch update for a {ContainerBase} of {PlatformNameShort} from 2.5 to 2.5.x.
-
-include::snippets/container-upgrades.adoc[]
+Perform an upgrade of containerized {PlatformNameShort}.
.Prerequisites
diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc
index 0fc287132a..bb56363f43 100644
--- a/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc
+++ b/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc
@@ -8,31 +8,9 @@
We use as much of the underlying native {RHEL} technology as possible. Podman is used for the container runtime and management of services.
-Use `podman ps` to list the running containers on the system:
+Use `podman ps` to list the running containers on the system.
-----
-$ podman ps
-
-CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
-88ed40495117 registry.redhat.io/rhel8/postgresql-13:latest run-postgresql 48 minutes ago Up 47 minutes postgresql
-8f55ba612f04 registry.redhat.io/rhel8/redis-6:latest run-redis 47 minutes ago Up 47 minutes redis
-56c40445c590 registry.redhat.io/ansible-automation-platform-24/ee-supported-rhel8:latest /usr/bin/receptor... 47 minutes ago Up 47 minutes receptor
-f346f05d56ee registry.redhat.io/ansible-automation-platform-24/controller-rhel8:latest /usr/bin/launch_a... 47 minutes ago Up 45 minutes automation-controller-rsyslog
-26e3221963e3 registry.redhat.io/ansible-automation-platform-24/controller-rhel8:latest /usr/bin/launch_a... 46 minutes ago Up 45 minutes automation-controller-task
-c7ac92a1e8a1 registry.redhat.io/ansible-automation-platform-24/controller-rhel8:latest /usr/bin/launch_a... 46 minutes ago Up 28 minutes automation-controller-web
-----
-
-Use `podman images` to display information about locally stored images:
-
-----
-$ podman images
-
-REPOSITORY TAG IMAGE ID CREATED SIZE
-registry.redhat.io/ansible-automation-platform-24/ee-supported-rhel8 latest b497bdbee59e 10 days ago 3.16 GB
-registry.redhat.io/ansible-automation-platform-24/controller-rhel8 latest ed8ebb1c1baa 10 days ago 1.48 GB
-registry.redhat.io/rhel8/redis-6 latest 78905519bb05 2 weeks ago 357 MB
-registry.redhat.io/rhel8/postgresql-13 latest 9b65bc3d0413 2 weeks ago 765 MB
-----
+Use `podman images` to display information about locally stored images.
Containerized {PlatformNameShort} runs as rootless containers for enhanced security by default. This means you can install containerized {PlatformNameShort} by using any local unprivileged user account. Privilege escalation is only needed for certain root level tasks, and by default is not needed to use root directly.
diff --git a/downstream/modules/platform/ref-images-inventory-variables.adoc b/downstream/modules/platform/ref-images-inventory-variables.adoc
index aa98d67f2e..188b6c8328 100644
--- a/downstream/modules/platform/ref-images-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-images-inventory-variables.adoc
@@ -18,7 +18,7 @@
| `controller_image`
| Container image for {ControllerName}.
| Optional
-| `controller-rhel8:latest`
+| `controller-rhel9:latest`
|
| `de_extra_images`
@@ -30,19 +30,19 @@
| `de_supported_image`
| Supported decision environment container image.
| Optional
-| `de-supported-rhel8:latest`
+| `de-supported-rhel9:latest`
|
| `eda_image`
| Backend container image for {EDAName}.
| Optional
-| `eda-controller-rhel8:latest`
+| `eda-controller-rhel9:latest`
|
| `eda_web_image`
| Front-end container image for {EDAName}.
| Optional
-| `eda-controller-ui-rhel8:latest`
+| `eda-controller-ui-rhel9:latest`
|
| `ee_extra_images`
@@ -54,37 +54,37 @@
| `ee_minimal_image`
| Minimal {ExecEnvShort} container image.
| Optional
-| `ee-minimal-rhel8:latest`
+| `ee-minimal-rhel9:latest`
|
| `ee_supported_image`
| Supported {ExecEnvShort} container image.
| Optional
-| `ee-supported-rhel8:latest`
+| `ee-supported-rhel9:latest`
|
| `gateway_image`
| Container image for {Gateway}.
| Optional
-| `gateway-rhel8:latest`
+| `gateway-rhel9:latest`
|
| `gateway_proxy_image`
| Container image for {Gateway} proxy.
| Optional
-| `gateway-proxy-rhel8:latest`
+| `gateway-proxy-rhel9:latest`
|
| `hub_image`
| Backend container image for {HubName}.
| Optional
-| `hub-rhel8:latest`
+| `hub-rhel9:latest`
|
| `hub_web_image`
| Front-end container image for {HubName}.
| Optional
-| `hub-web-rhel8:latest`
+| `hub-web-rhel9:latest`
|
| `pcp_image`
@@ -102,7 +102,7 @@
| `receptor_image`
| Container image for receptor.
| Optional
-| `receptor-rhel8:latest`
+| `receptor-rhel9:latest`
|
| `redis_image`
diff --git a/downstream/snippets/cont-tested-system-config.adoc b/downstream/snippets/cont-tested-system-config.adoc
index 4e7a812769..8f7c352726 100644
--- a/downstream/snippets/cont-tested-system-config.adoc
+++ b/downstream/snippets/cont-tested-system-config.adoc
@@ -12,7 +12,7 @@ a|
| Operating system
a|
-* {RHEL} 9.2 or later minor versions of {RHEL} 9.
+* {RHEL} 9.4 or later minor versions of {RHEL} 9.
* {RHEL} 10 or later minor versions of {RHEL} 10 for enterprise topologies.
|
@@ -33,7 +33,11 @@ a|
|
| Database
-| {PostgresVers}
-| External (customer supported) databases require ICU support.
+a|
+* For {PlatformNameShort} managed databases: {PostgresVers}
+* For customer provided (external) databases: {PostgresVers}, 16, or 17.
+a|
+* External (customer supported) databases require ICU support.
+* External databases using PostgreSQL 16 or 17 must rely on external backup and restore processes. Backup and restore functionality is dependent on utilities provided with {PostgresVers}.
|====
diff --git a/downstream/snippets/inventory-cont-a-env-a.adoc b/downstream/snippets/inventory-cont-a-env-a.adoc
index d2b52af0ca..8537814066 100644
--- a/downstream/snippets/inventory-cont-a-env-a.adoc
+++ b/downstream/snippets/inventory-cont-a-env-a.adoc
@@ -5,12 +5,12 @@
# This is the {PlatformNameShort} installer inventory file intended for the container growth deployment topology.
# This inventory file expects to be run from the host where {PlatformNameShort} will be installed.
# Consult the {PlatformNameShort} product documentation about this topology's tested hardware configuration.
-# {URLTopologies}/container-topologies
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/container-topologies
#
# Consult the docs if you are unsure what to add
# For all optional variables consult the included README.md
# or the {PlatformNameShort} documentation:
-# {URLContainerizedInstall}
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation
# This section is for your {Gateway} hosts
# -----------------------------------------------------
@@ -42,7 +42,7 @@ aap.example.org
ansible_connection=local
# Common variables
-# {URLContainerizedInstall}/appendix-inventory-files-vars#general-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#general-variables
# -----------------------------------------------------
postgresql_admin_username=postgres
postgresql_admin_password=
@@ -53,14 +53,14 @@ registry_password=
redis_mode=standalone
# {GatewayStart}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#platform-gateway-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#platform-gateway-variables
# -----------------------------------------------------
gateway_admin_password=
gateway_pg_host=aap.example.org
gateway_pg_password=
# {ControllerNameStart}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#controller-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#controller-variables
# -----------------------------------------------------
controller_admin_password=
controller_pg_host=aap.example.org
@@ -68,7 +68,7 @@ controller_pg_password=
controller_percent_memory_capacity=0.5
# {HubNameStart}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#hub-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#hub-variables
# -----------------------------------------------------
hub_admin_password=
hub_pg_host=aap.example.org
@@ -76,7 +76,7 @@ hub_pg_password=
hub_seed_collections=false
# {EDAcontroller}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#event-driven-ansible-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#event-driven-ansible-variables
# -----------------------------------------------------
eda_admin_password=
eda_pg_host=aap.example.org
diff --git a/downstream/snippets/inventory-cont-b-env-a.adoc b/downstream/snippets/inventory-cont-b-env-a.adoc
index fe8c9c8ff5..2cd515390e 100644
--- a/downstream/snippets/inventory-cont-b-env-a.adoc
+++ b/downstream/snippets/inventory-cont-b-env-a.adoc
@@ -6,7 +6,7 @@
# Consult the docs if you are unsure what to add
# For all optional variables consult the included README.md
# or the Red Hat documentation:
-# {URLContainerizedInstall}
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation
# This section is for your {Gateway} hosts
# -----------------------------------------------------
@@ -50,7 +50,7 @@ eda2.example.org
[all:vars]
# Common variables
-# {URLContainerizedInstall}/appendix-inventory-files-vars#general-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#general-variables
# -----------------------------------------------------
postgresql_admin_username=
postgresql_admin_password=
@@ -58,7 +58,7 @@ registry_username=
registry_password=
# {GatewayStart}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#platform-gateway-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#platform-gateway-variables
# -----------------------------------------------------
gateway_admin_password=
gateway_pg_host=externaldb.example.org
@@ -67,7 +67,7 @@ gateway_pg_username=
gateway_pg_password=
# {ControllerNameStart}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#controller-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#controller-variables
# -----------------------------------------------------
controller_admin_password=
controller_pg_host=externaldb.example.org
@@ -76,7 +76,7 @@ controller_pg_username=
controller_pg_password=
# {HubNameStart}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#hub-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#hub-variables
# -----------------------------------------------------
hub_admin_password=
hub_pg_host=externaldb.example.org
@@ -85,7 +85,7 @@ hub_pg_username=
hub_pg_password=
# {EDAcontroller}
-# {URLContainerizedInstall}/appendix-inventory-files-vars#event-driven-ansible-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/appendix-inventory-files-vars#event-driven-ansible-variables
# -----------------------------------------------------
eda_admin_password=
eda_pg_host=externaldb.example.org
From 0cbc9c8db2ba9e0d8dd94c6c259f4740887e77ff Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Fri, 25 Jul 2025 14:09:08 +0100
Subject: [PATCH 05/71] Create AAP 2.6 inventory files for Tested deployment
models (#3912) (#3913)
Create AAP 2.6 inventory files for Tested deployment models
https://issues.redhat.com/browse/AAP-47926
---
downstream/snippets/inventory-rpm-a-env-a.adoc | 16 ++++++++--------
downstream/snippets/inventory-rpm-b-env-a.adoc | 14 +++++++-------
2 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/downstream/snippets/inventory-rpm-a-env-a.adoc b/downstream/snippets/inventory-rpm-a-env-a.adoc
index 0b3d680733..aa01e2baba 100644
--- a/downstream/snippets/inventory-rpm-a-env-a.adoc
+++ b/downstream/snippets/inventory-rpm-a-env-a.adoc
@@ -4,11 +4,11 @@
----
# This is the {PlatformNameShort} installer inventory file intended for the RPM growth deployment topology.
# Consult the {PlatformNameShort} product documentation about this topology's tested hardware configuration.
-# {URLTopologies}/rpm-topologies
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/rpm-topologies
#
# Consult the docs if you are unsure what to add
# For all optional variables consult the {PlatformNameShort} documentation:
-# {URLInstallationGuide}
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/index
# This section is for your {Gateway} hosts
@@ -47,7 +47,7 @@ db.example.org
[all:vars]
# Common variables
-# {URLInstallationGuide}/appendix-inventory-files-vars#general-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#general-variables
# -----------------------------------------------------
registry_username=
registry_password=
@@ -55,30 +55,30 @@ registry_password=
redis_mode=standalone
# {GatewayStart}
-# {URLInstallationGuide}/appendix-inventory-files-vars#platform-gateway-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#platform-gateway-variables
# -----------------------------------------------------
automationgateway_admin_password=
automationgateway_pg_host=db.example.org
automationgateway_pg_password=
# {ControllerNameStart}
-# {URLInstallationGuide}/appendix-inventory-files-vars#controller-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#controller-variables
# -----------------------------------------------------
admin_password=
pg_host=db.example.org
pg_password=
# {HubNameStart}
-# {URLInstallationGuide}/appendix-inventory-files-vars#hub-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#hub-variables
# -----------------------------------------------------
automationhub_admin_password=
automationhub_pg_host=db.example.org
automationhub_pg_password=
# {EDAcontroller}
-# {URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#event-driven-ansible-variables
# -----------------------------------------------------
automationedacontroller_admin_password=
automationedacontroller_pg_host=db.example.org
automationedacontroller_pg_password=
-----
\ No newline at end of file
+----
diff --git a/downstream/snippets/inventory-rpm-b-env-a.adoc b/downstream/snippets/inventory-rpm-b-env-a.adoc
index fce25e63de..c42f3be2be 100644
--- a/downstream/snippets/inventory-rpm-b-env-a.adoc
+++ b/downstream/snippets/inventory-rpm-b-env-a.adoc
@@ -5,7 +5,7 @@
# This is the {PlatformNameShort} enterprise installer inventory file
# Consult the docs if you are unsure what to add
# For all optional variables consult the Red Hat documentation:
-# {URLInstallationGuide}
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/index
# This section is for your {Gateway} hosts
# -----------------------------------------------------
@@ -51,13 +51,13 @@ eda2.example.org
[all:vars]
# Common variables
-# {URLInstallationGuide}/appendix-inventory-files-vars#general-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#general-variables
# -----------------------------------------------------
registry_username=
registry_password=
# {GatewayStart}
-# {URLInstallationGuide}/appendix-inventory-files-vars#platform-gateway-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#platform-gateway-variables
# -----------------------------------------------------
automationgateway_admin_password=
automationgateway_pg_host=
@@ -66,7 +66,7 @@ automationgateway_pg_username=
automationgateway_pg_password=
# {ControllerNameStart}
-# {URLInstallationGuide}/appendix-inventory-files-vars#controller-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#controller-variables
# -----------------------------------------------------
admin_password=
pg_host=
@@ -75,7 +75,7 @@ pg_username=
pg_password=
# {HubNameStart}
-# {URLInstallationGuide}/appendix-inventory-files-vars#hub-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#hub-variables
# -----------------------------------------------------
automationhub_admin_password=
automationhub_pg_host=
@@ -84,11 +84,11 @@ automationhub_pg_username=
automationhub_pg_password=
# {EDAcontroller}
-# {URLInstallationGuide}/appendix-inventory-files-vars#event-driven-ansible-variables
+# https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/rpm_installation/appendix-inventory-files-vars#event-driven-ansible-variables
# -----------------------------------------------------
automationedacontroller_admin_password=
automationedacontroller_pg_host=
automationedacontroller_pg_database=
automationedacontroller_pg_username=
automationedacontroller_pg_password=
-----
\ No newline at end of file
+----
From 821048932a86596e3b351d39d4489cd0ab157e78 Mon Sep 17 00:00:00 2001
From: ccopelloRH
Date: Fri, 25 Jul 2025 16:07:36 -0400
Subject: [PATCH 06/71] Update docinfo.xml productnumber
Updated 2.4 to 2.6
---
downstream/titles/aap-plugin-rhdh-using/docinfo.xml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/downstream/titles/aap-plugin-rhdh-using/docinfo.xml b/downstream/titles/aap-plugin-rhdh-using/docinfo.xml
index 84f78c8be9..1c812f1a74 100644
--- a/downstream/titles/aap-plugin-rhdh-using/docinfo.xml
+++ b/downstream/titles/aap-plugin-rhdh-using/docinfo.xml
@@ -1,6 +1,6 @@
Using Ansible plug-ins for Red Hat Developer Hub
Red Hat Ansible Automation Platform
-2.4
+2.6
Use Ansible plug-ins for Red Hat Developer Hub
This guide describes how to use Ansible plug-ins for Red Hat Developer Hub to learn about Ansible, explore curated collections, and create playbook projects.
From b0079e355811661d098b7179ecaeb55d4c9fd509 Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Mon, 28 Jul 2025 09:32:19 +0100
Subject: [PATCH 07/71] updating Containerized installation (#3914) (#3915)
---
...bling-automation-hub-collection-and-container-signing.adoc | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc b/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
index 3f6a1e3a27..0840347165 100644
--- a/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
+++ b/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
@@ -135,13 +135,13 @@ j920hRy/3wJGRDBMFa4mlQg=
----
# Collection signing
hub_collection_signing=true
-hub_collection_signing_key=/home/aapuser/aap/ansible-automation-platform-containerized-setup-2.5-2/collection-signing-key.priv
+hub_collection_signing_key=/home/aapuser/aap/ansible-automation-platform-containerized-setup-/collection-signing-key.priv
# This variable is required if the key is protected by a passphrase
hub_collection_signing_pass=
# Container signing
hub_container_signing=true
-hub_container_signing_key=/home/aapuser/aap/ansible-automation-platform-containerized-setup-2.5-2/container-signing-key.priv
+hub_container_signing_key=/home/aapuser/aap/ansible-automation-platform-containerized-setup-/container-signing-key.priv
# This variable is required if the key is protected by a passphrase
hub_container_signing_pass=
----
From ff44f0c137145eed7e26226bbcd32f1d9bc3ae49 Mon Sep 17 00:00:00 2001
From: Ian Fowler <77341519+ianf77@users.noreply.github.com>
Date: Mon, 28 Jul 2025 13:28:26 +0100
Subject: [PATCH 08/71] DITA migration changes: Security automation (#3917)
(#3920)
DITA pre-migration tasks. Security automation Guide
https://issues.redhat.com/browse/AAP-49630
---
.../assembly-firewall-policy-manage.adoc | 27 ++++-------------
.../assemblies/security/assembly-idps.adoc | 29 ++++---------------
.../con-about-firewall-policy-management.adoc | 6 ++--
.../security/con-automate-idps-rules.adoc | 8 +++--
.../con-automating-firewall-rules.adoc | 21 ++++++--------
.../modules/security/con-requirements.adoc | 4 ++-
.../security/proc-creating-firewall-rule.adoc | 11 ++-----
.../security/proc-creating-idps-rule.adoc | 2 ++
.../security/proc-deleting-firewall-rule.adoc | 15 +++-------
.../security/proc-verifying-idps-install.adoc | 24 +++++++--------
10 files changed, 49 insertions(+), 98 deletions(-)
diff --git a/downstream/assemblies/security/assembly-firewall-policy-manage.adoc b/downstream/assemblies/security/assembly-firewall-policy-manage.adoc
index 71b82f5945..0f10cc403b 100644
--- a/downstream/assemblies/security/assembly-firewall-policy-manage.adoc
+++ b/downstream/assemblies/security/assembly-firewall-policy-manage.adoc
@@ -1,20 +1,8 @@
-////
-Retains the context of the parent assembly if this assembly is nested within another assembly.
-For more information about nesting assemblies, see: https://redhat-documentation.github.io/modular-docs/#nesting-assemblies
-See also the complementary step on the last line of this file.
-////
+:_mod-docs-content-type: ASSEMBLY
ifdef::context[:parent-context: {context}]
:imagesdir: images
-////
- Base the file name and the ID on the assembly title. For example:
-* file name: assembly-my-user-story.adoc
-* ID: [id="assembly-my-user-story_{context}"]
-* Title: = My user story
-
-The ID is an anchor that links to the module. Avoid changing it after the module has been published to ensure existing links are not broken. Include {context} in the ID so the assembly can be reused.
-////
[id="assembly-firewall-policy-management_{context}"]
@@ -24,20 +12,15 @@ The ID is an anchor that links to the module. Avoid changing it after the module
[role="_abstract"]
-As a security operator, you can use Ansible security automation to manage multiple firewall policies. Create and delete firewall rules to block or unblock a source IP address from accessing a destination IP address.
+As a security operator, you can use Ansible security automation to manage multiple firewall policies or create and delete firewall rules to block or unblock a source IP address from accessing a destination IP address.
include::security/con-about-firewall-policy-management.adoc[leveloffset=+1]
+
include::security/con-automating-firewall-rules.adoc[leveloffset=+1]
-////
-[leveloffset=+1] ensures that when a module title is a level 1 heading (= Title), the heading will be interpreted as a level-2 heading (== Title) in the assembly. Use [leveloffset=+2] and [leveloffset=+3] to nest modules in an assembly.
-////
+include::security/proc-creating-firewall-rule.adoc[leveloffset=+2]
-include::security/proc-creating-firewall-rule.adoc[leveloffset=+1]
-include::security/proc-deleting-firewall-rule.adoc[leveloffset=+1]
+include::security/proc-deleting-firewall-rule.adoc[leveloffset=+2]
-////
-Restore the context to what it was before this assembly.
-////
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
diff --git a/downstream/assemblies/security/assembly-idps.adoc b/downstream/assemblies/security/assembly-idps.adoc
index c84bee7838..54d1b2c3cd 100644
--- a/downstream/assemblies/security/assembly-idps.adoc
+++ b/downstream/assemblies/security/assembly-idps.adoc
@@ -1,43 +1,26 @@
-////
-Retains the context of the parent assembly if this assembly is nested within another assembly.
-For more information about nesting assemblies, see: https://redhat-documentation.github.io/modular-docs/#nesting-assemblies
-See also the complementary step on the last line of this file.
-////
+:_mod-docs-content-type: ASSEMBLY
ifdef::context[:parent-context: {context}]
:imagesdir: images
-////
- Base the file name and the ID on the assembly title. For example:
-* file name: assembly-my-user-story.adoc
-* ID: [id="assembly-my-user-story_{context}"]
-* Title: = My user story
-
-The ID is an anchor that links to the module. Avoid changing it after the module has been published to ensure existing links are not broken. Include {context} in the ID so the assembly can be reused.
-////
-
[id="assembly-idps_{context}"]
-= Automating Network Intrusion Detection and Prevention Systems (IDPS) with Ansible
+= Automating Network Intrusion Detection and Prevention Systems (IDPS) with {PlatformNameShort}
:context: idps
[role="_abstract"]
-You can use Ansible to automate your Intrusion Detection and Prevention System (IDPS). For the purpose of this guide, we use Snort as the IDPS. Use Ansible automation hub to consume content collections, such as tasks, roles, and modules to create automated workflows.
+You can use {PlatformNameShort} to automate your _Intrusion Detection and Prevention System_ (IDPS). For the purpose of this guide, we use Snort as the IDPS. Use {HubName} to consume content collections, such as tasks, roles, and modules to create automated workflows.
include::security/con-requirements.adoc[leveloffset=+1]
-include::security/proc-verifying-idps-install.adoc[leveloffset=+2]
-////
-[leveloffset=+1] ensures that when a module title is a level 1 heading (= Title), the heading will be interpreted as a level-2 heading (== Title) in the assembly. Use [leveloffset=+2] and [leveloffset=+3] to nest modules in an assembly.
-////
+include::security/proc-verifying-idps-install.adoc[leveloffset=+2]
include::security/con-automate-idps-rules.adoc[leveloffset=+1]
+
include::security/proc-creating-idps-rule.adoc[leveloffset=+2]
-////
-Restore the context to what it was before this assembly.
-////
+
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
diff --git a/downstream/modules/security/con-about-firewall-policy-management.adoc b/downstream/modules/security/con-about-firewall-policy-management.adoc
index 57de0ee84c..0a1c73f252 100644
--- a/downstream/modules/security/con-about-firewall-policy-management.adoc
+++ b/downstream/modules/security/con-about-firewall-policy-management.adoc
@@ -1,10 +1,8 @@
+:_mod-docs-content-type: CONCEPT
+
[id="con-about-firewall-policy-management_{context}"]
= About firewall policy management
-////
-[role="_abstract"]
-Manage multiple firewall policies across various products and vendors with Ansible security automation.
-////
An organization’s network firewall is the first line of defense against an attack and a vital component for maintaining a secure environment. As a security operator, you construct and manage secure networks to ensure that your firewall only allows inbound and outbound network traffic defined by your organization’s firewall policies. A firewall policy consists of security rules that protect the network against harmful incoming and outgoing traffic.
diff --git a/downstream/modules/security/con-automate-idps-rules.adoc b/downstream/modules/security/con-automate-idps-rules.adoc
index 4a5d3a08d9..f8aa274b88 100644
--- a/downstream/modules/security/con-automate-idps-rules.adoc
+++ b/downstream/modules/security/con-automate-idps-rules.adoc
@@ -1,10 +1,12 @@
[id="con-automate-ids-rules_{context}"]
-= Automating your IDPS rules with Ansible
+= Automating your IDPS rules with {PlatformNameShort}
-To automate your IDPS, use the `ids_rule` role to create and change Snort rules. Snort uses rule-based language that analyzes your network traffic and compares it against the given rule set.
+To automate your IDPS, use the `ids_rule` role to create and change Snort rules.
+Snort uses rule-based language that analyzes your network traffic and compares it against the given rule set.
-The following lab environment demonstrates what an Ansible security automation integration would look like. A machine called “Attacker” simulates a potential attack pattern on the target machine on which the IDPS is running.
+The following lab environment demonstrates what an Ansible security automation integration would look like.
+A machine called “Attacker” simulates a potential attack pattern on the target machine on which the IDPS is running.
Keep in mind that a real world setup will feature other vendors and technologies.
diff --git a/downstream/modules/security/con-automating-firewall-rules.adoc b/downstream/modules/security/con-automating-firewall-rules.adoc
index f3a130f59d..32eeda0ac8 100644
--- a/downstream/modules/security/con-automating-firewall-rules.adoc
+++ b/downstream/modules/security/con-automating-firewall-rules.adoc
@@ -1,22 +1,19 @@
-////
-Base the file name and the ID on the module title. For example:
-* file name: con-my-concept-module-a.adoc
-* ID: [id="con-my-concept-module-a_{context}"]
-* Title: = My concept module A
-////
+:_mod-docs-content-type: CONCEPT
[id="con-automating-firewall-rules_{context}"]
= Automate firewall rules
-////
-In the title of concept modules, include nouns or noun phrases that are used in the body text. This helps readers and search engines find the information quickly. Do not start the title of concept modules with a verb. See also _Wording of headings_ in _The IBM Style Guide_.
-////
-Ansible security automation enables you to automate various firewall policies that require a series of actions across various products. You can use an Ansible role, such as the https://github.com/ansible-security/acl_manager[acl_manager] role to manage your Access Control Lists (ACLs) for many firewall devices such as blocking or unblocking an IP or URL. Roles let you automatically load related vars, files, tasks, handlers, and other Ansible artifacts based on a known file structure. After you group your content in roles, you can easily reuse them and share them with other users.
+Ansible security automation enables you to automate various firewall policies that require a series of actions across various products.
+You can use an Ansible role, such as the https://github.com/ansible-security/acl_manager[acl_manager] role to manage your _Access Control Lists_ (ACLs) for many firewall devices such as blocking or unblocking an IP or URL.
+Roles let you automatically load related vars, files, tasks, handlers, and other Ansible artifacts based on a known file structure.
+After you group your content in roles, you can easily reuse them and share them with other users.
-The below lab environment is a simplified example of a real-world enterprise security architecture, which can be more complex and include additional vendor-specific tools. This is a typical incident response scenario where you receive an intrusion alert and immediately execute a playbook with the acl_manger role that blocks the attacker’s IP address.
+The following lab environment is a simplified example of a real-world enterprise security architecture, which can be more complex and include additional vendor-specific tools.
+This is a typical incident response scenario where you receive an intrusion alert and immediately execute a playbook with the acl_manger role that blocks the attacker’s IP address.
-Your entire team can use Ansible security automation to address investigations, threat hunting, and incident response all on one platform. https://www.redhat.com/en/technologies/management/ansible[Red Hat Ansible Automation Platform] provides you with certified content collections that are easy to consume and reuse within your security team.
+Your entire team can use Ansible security automation to address investigations, threat hunting, and incident response all on one platform.
+https://www.redhat.com/en/technologies/management/ansible[Red Hat Ansible Automation Platform] provides you with certified content collections that are easy to consume and reuse within your security team.
image::security-lab-environment.png[Simplified security lab environment]
diff --git a/downstream/modules/security/con-requirements.adoc b/downstream/modules/security/con-requirements.adoc
index 628b13933b..50a89d68b5 100644
--- a/downstream/modules/security/con-requirements.adoc
+++ b/downstream/modules/security/con-requirements.adoc
@@ -1,8 +1,10 @@
+:_mod-docs-content-type: CONCEPT
+
[id="con-requirements_{context}"]
= Requirements and prerequisites
-Before you begin automating your IDPS with Ansible, ensure that you have the proper installations and configurations necessary to successfully manage your IDPS.
+Before you begin automating your IDPS with {PlatformNameShort}, ensure that you have the proper installations and configurations necessary to successfully manage your IDPS.
* You have installed Ansible-core 2.15 or later.
* SSH connection and keys are configured.
diff --git a/downstream/modules/security/proc-creating-firewall-rule.adoc b/downstream/modules/security/proc-creating-firewall-rule.adoc
index 038c93a712..4c5381e17f 100644
--- a/downstream/modules/security/proc-creating-firewall-rule.adoc
+++ b/downstream/modules/security/proc-creating-firewall-rule.adoc
@@ -1,15 +1,8 @@
-////
-Base the file name and the ID on the module title. For example:
-* file name: proc-doing-procedure-a.adoc
-* ID: [id="doing-procedure-a_{context}"]
-* Title: = Doing procedure A
-
-The ID is an anchor that links to the module. Avoid changing it after the module has been published to ensure existing links are not broken.
-////
+:_mod-docs-content-type: PROCEDURE
[id="proc-creating-firewall-rule_{context}"]
-== Creating a new firewall rule
+= Creating a new firewall rule
[role="_abstract"]
Use the acl_manager role to create a new firewall rule for blocking a source IP address from accessing a destination IP address.
diff --git a/downstream/modules/security/proc-creating-idps-rule.adoc b/downstream/modules/security/proc-creating-idps-rule.adoc
index 99c61037df..434e38fffb 100644
--- a/downstream/modules/security/proc-creating-idps-rule.adoc
+++ b/downstream/modules/security/proc-creating-idps-rule.adoc
@@ -1,3 +1,5 @@
+:_mod-docs-content-type: PROCEDURE
+
[id="proc-creating-ids-rule_{context}"]
= Creating a new IDPS rule
diff --git a/downstream/modules/security/proc-deleting-firewall-rule.adoc b/downstream/modules/security/proc-deleting-firewall-rule.adoc
index b52f55cee1..a52654700c 100644
--- a/downstream/modules/security/proc-deleting-firewall-rule.adoc
+++ b/downstream/modules/security/proc-deleting-firewall-rule.adoc
@@ -1,15 +1,8 @@
-////
-Base the file name and the ID on the module title. For example:
-* file name: proc-doing-procedure-a.adoc
-* ID: [id="doing-procedure-a_{context}"]
-* Title: = Doing procedure A
-
-The ID is an anchor that links to the module. Avoid changing it after the module has been published to ensure existing links are not broken.
-////
+:_mod-docs-content-type: PROCEDURE
[id="proc-deleting-rule_{context}"]
-== Deleting a firewall rule
+= Deleting a firewall rule
[role="_abstract"]
Use the acl_manager role to delete a security rule.
@@ -27,7 +20,7 @@ Use the acl_manager role to delete a security rule.
$ ansible-galaxy install ansible_security.acl_manager
----
-. Using CLI, create a new playbook with the acl_manger role and set the parameters (e.g., source object, destination object, access rule between the two objects):
+. Using CLI, create a new playbook with the acl_manger role and set the parameters, for example, source object, destination object, access rule between the two objects:
+
----
- name: delete block list entry
@@ -43,7 +36,7 @@ $ ansible-galaxy install ansible_security.acl_manager
ansible_network_os: checkpoint
----
-. Run the playbook $ ansible-navigator run --ee false :
+. Run the playbook `$ ansible-navigator run --ee false `:
+
image::security-delete-rule.png[Playbook with deleted firewall rule]
diff --git a/downstream/modules/security/proc-verifying-idps-install.adoc b/downstream/modules/security/proc-verifying-idps-install.adoc
index e590e9cd2c..54a7434880 100644
--- a/downstream/modules/security/proc-verifying-idps-install.adoc
+++ b/downstream/modules/security/proc-verifying-idps-install.adoc
@@ -1,18 +1,14 @@
-////
-Base the file name and the ID on the module title. For example:
-* file name: proc-doing-procedure-a.adoc
-* ID: [id="doing-procedure-a_{context}"]
-* Title: = Doing procedure A
-
-The ID is an anchor that links to the module. Avoid changing it after the module has been published to ensure existing links are not broken.
-////
+:_mod-docs-content-type: PROCEDURE
[id="proc-verifying-ids-install_{context}"]
= Verifying your IDPS installation
-To verify that Snort has been configured successfully, call it via `sudo` and ask for the version:
+Use the following procedure to verify that Snort has been configured successfully:
+.Procedure
+. Call snort using `sudo` and ask for the version:
++
----
$ sudo snort --version
@@ -26,8 +22,10 @@ To verify that Snort has been configured successfully, call it via `sudo` and as
Using ZLIB version: 1.2.7
----
-Verify that the service is actively running via `sudo systemctl`:
-
+. Verify that the service is actively running using the following command:
++
+`sudo systemctl`:
++
----
$ sudo systemctl status snort
● snort.service - Snort service
@@ -39,6 +37,6 @@ $ sudo systemctl status snort
[...]
----
-If the Snort service is not actively running, restart it with `systemctl restart snort` and recheck the status.
+. If the Snort service is not actively running, restart it with `systemctl restart snort` and recheck the status.
-Once you confirm the service is actively running, exit the Snort server by simultaneously pressing `CTRL` and `D`, or by typing `exit` on the command line. All further interaction will be done through Ansible from the Ansible control host.
+. When you confirm that the service is actively running, exit the Snort server by simultaneously pressing `CTRL` and `D`, or by typing `exit` on the command line. All further interaction will be done through {PlatformNameShort} from the Ansible control host.
From d7fa5a748faf3f58f12d28530dad816cd4cbe356 Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Tue, 29 Jul 2025 09:48:37 +0100
Subject: [PATCH 09/71] Update backup/restore note (#3918) (#3925)
Configuring automation execution - Update backup and restore note to ensure clarity regarding the latest version prerequisite
https://issues.redhat.com/browse/AAP-46263
---
.../assembly-ag-controller-backup-and-restore.adoc | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc b/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc
index 6b332fb986..a11dc60e04 100644
--- a/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc
+++ b/downstream/assemblies/platform/assembly-ag-controller-backup-and-restore.adoc
@@ -10,12 +10,12 @@ For more information, see the link:{URLControllerAdminGuide}/index#controller-ba
[NOTE]
====
-Ensure that you restore to the same version from which it was backed up.
-However, you must use the most recent minor version of a release to backup or restore your {PlatformNameShort} installation version.
-For example, if the current {PlatformNameShort} version you are on is 2.0.x, use only the latest 2.0 installer.
+When backing up {PlatformNameShort}, use the installation program that matches your currently installed version of {PlatformNameShort}.
-Backup and restore only works on PostgreSQL versions supported by your current platform version.
-For more information, see link:{URLPlanningGuide}/platform-system-requirements[System requirements] in the _{TitlePlanningGuide}_.
+When restoring {PlatformNameShort}, use the latest installation program available at the time of the restore. For example, if you are restoring a backup taken from version `2.6-1`, use the latest `2.6-x` installation program available at the time of the restore.
+
+Backup and restore functionality only works with the PostgreSQL versions supported by your current {PlatformNameShort} version.
+For more information, see link:{URLPlanningGuide}/platform-system-requirements[System requirements] in _{TitlePlanningGuide}_.
====
The {PlatformNameShort} setup playbook is invoked as `setup.sh` from the path where you unpacked the platform installer tarball.
From e342baab92384990b52dde93e948e9403158f720 Mon Sep 17 00:00:00 2001
From: g-murray <147741787+g-murray@users.noreply.github.com>
Date: Tue, 29 Jul 2025 13:41:54 +0100
Subject: [PATCH 10/71] AAP-48233 edits to PG (#3928)
---
.../platform/proc-operator-external-db-controller.adoc | 4 +++-
.../modules/platform/proc-operator-external-db-gateway.adoc | 4 +++-
.../modules/platform/proc-operator-external-db-hub.adoc | 4 +++-
3 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/downstream/modules/platform/proc-operator-external-db-controller.adoc b/downstream/modules/platform/proc-operator-external-db-controller.adoc
index a386db3774..ddd852a5a4 100644
--- a/downstream/modules/platform/proc-operator-external-db-controller.adoc
+++ b/downstream/modules/platform/proc-operator-external-db-controller.adoc
@@ -24,7 +24,9 @@ The external database must be a PostgreSQL database that is the version supporte
[NOTE]
====
-{PlatformNameShort} {PlatformVers} supports {PostgresVers}.
+{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and PostgreSQL 16 and 17 for external databases.
+
+If you choose to use an externally managed database with version 16 or 17 you must also rely on external backup and restore processes.
====
.Procedure
diff --git a/downstream/modules/platform/proc-operator-external-db-gateway.adoc b/downstream/modules/platform/proc-operator-external-db-gateway.adoc
index 0d75ae9014..c299fbb532 100644
--- a/downstream/modules/platform/proc-operator-external-db-gateway.adoc
+++ b/downstream/modules/platform/proc-operator-external-db-gateway.adoc
@@ -44,7 +44,9 @@ The external database must be a PostgreSQL database that is the version supporte
[NOTE]
====
-{PlatformNameShort} {PlatformVers} supports {PostgresVers}.
+{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and PostgreSQL 16 and 17 for external databases.
+
+If you choose to use an externally managed database with version 16 or 17 you must also rely on external backup and restore processes.
====
.Procedure
diff --git a/downstream/modules/platform/proc-operator-external-db-hub.adoc b/downstream/modules/platform/proc-operator-external-db-hub.adoc
index 73f58776ed..27c3e0a137 100644
--- a/downstream/modules/platform/proc-operator-external-db-hub.adoc
+++ b/downstream/modules/platform/proc-operator-external-db-hub.adoc
@@ -25,7 +25,9 @@ The external postgres instance credentials and connection information will need
[NOTE]
====
-{PlatformNameShort} {PlatformVers} supports {PostgresVers}.
+{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and PostgreSQL 16 and 17 for external databases.
+
+If you choose to use an externally managed database with version 16 or 17 you must also rely on external backup and restore processes.
====
.Procedure
From 94fd5f826ad52adb8a9fa351b29b90451d626ab6 Mon Sep 17 00:00:00 2001
From: Hala
Date: Tue, 29 Jul 2025 09:10:58 -0500
Subject: [PATCH 11/71] adds PaC info to getting started guide intro (#3922)
(#3931)
---
downstream/modules/platform/con-gs-automation-execution.adoc | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/downstream/modules/platform/con-gs-automation-execution.adoc b/downstream/modules/platform/con-gs-automation-execution.adoc
index b820c15f6a..7c08cd01ce 100644
--- a/downstream/modules/platform/con-gs-automation-execution.adoc
+++ b/downstream/modules/platform/con-gs-automation-execution.adoc
@@ -14,3 +14,7 @@ In the automation execution environment, you can use {ControllerName} tasks to b
An inventory is a single file, usually in INI or YAML format, containing a list of hosts and groups that can be acted upon using Ansible commands and playbooks.
You can use an inventory file to specify your installation scenario and describe host deployments to Ansible.
You can also use an inventory file to organize managed nodes in centralized files that give Ansible with system information and network locations.
+
+== Policy enforcement
+
+Policy enforcement at automation runtime is a feature that uses encoded rules to define, manage, and enforce policies that govern how your users interact with your {PlatformNameShort} instance. Policy enforcement automates policy management, improving security, compliance, and efficiency. Policy enforcement points can be configured at the level of the inventory, job template, or organization. For more, see link:{URLControllerAdminGuide}/controller-pac[Implementing policy enforcement] in the {TitleControllerAdminGuide} guide.
\ No newline at end of file
From 56586f09251667f52cb013dea0b1832af1bbffff Mon Sep 17 00:00:00 2001
From: g-murray <147741787+g-murray@users.noreply.github.com>
Date: Wed, 30 Jul 2025 09:08:39 +0100
Subject: [PATCH 12/71] AAP-50441-migration-edits (#3929) (#3934)
---
.../aap-migration/con-migration-process-overview.adoc | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/downstream/modules/aap-migration/con-migration-process-overview.adoc b/downstream/modules/aap-migration/con-migration-process-overview.adoc
index e1d8338542..5c5b1eb088 100644
--- a/downstream/modules/aap-migration/con-migration-process-overview.adoc
+++ b/downstream/modules/aap-migration/con-migration-process-overview.adoc
@@ -3,6 +3,11 @@
[id="migration-process-overview"]
= Migration process overview
+[IMPORTANT]
+====
+You can only migrate to a different installation type of the same {PlatformNameShort} version. For example you can migrate from RPM version {PlatformVers} to containerized {PlatformVers}, but not from RPM version 2.4 to containerized {PlatformVers}.
+====
+
The migration between {PlatformNameShort} installation types follows this general workflow:
. Prepare and assess the source environment - Prepare and assess the existing source environment for migration.
From 10d412605710ee8edbe77d799eaa15e5bca69168 Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Wed, 30 Jul 2025 11:02:41 +0100
Subject: [PATCH 13/71] 2.6 Update Tested deployment models for 2.6 (#3936)
(#3938)
Update Tested deployment models for AAP 2.6
https://issues.redhat.com/browse/AAP-47918
---
.../ref-installation-deployment-models.adoc | 19 ++++++++++-------
.../modules/topologies/ref-ocp-a-env-a.adoc | 21 ++++++++++++-------
.../rpm-env-a-tested-system-config.adoc | 15 +++++++------
downstream/titles/topologies/master.adoc | 6 +++---
4 files changed, 37 insertions(+), 24 deletions(-)
diff --git a/downstream/modules/topologies/ref-installation-deployment-models.adoc b/downstream/modules/topologies/ref-installation-deployment-models.adoc
index ffc550ddf2..dd32d55245 100644
--- a/downstream/modules/topologies/ref-installation-deployment-models.adoc
+++ b/downstream/modules/topologies/ref-installation-deployment-models.adoc
@@ -9,21 +9,24 @@ The following table outlines the different ways to install or deploy {PlatformNa
[options="header"]
|====
| Mode | Infrastructure | Description | Tested topologies
-| RPM | Virtual machines and bare metal | The RPM installer deploys {PlatformNameShort} on {RHEL} by using RPMs to install the platform on host machines. Customers manage the product and infrastructure lifecycle.
-a|
-* link:{URLTopologies}/rpm-topologies#rpm-a-env-a[RPM {GrowthTopology}]
-* link:{URLTopologies}/rpm-topologies#rpm-b-env-a[RPM {EnterpriseTopology}]
| Containers
| Virtual machines and bare metal
| The containerized installer deploys {PlatformNameShort} on {RHEL} by using Podman which runs the platform in containers on host machines. Customers manage the product and infrastructure lifecycle.
a|
-* link:{URLTopologies}/container-topologies#cont-a-env-a[Container {GrowthTopology}]
-* link:{URLTopologies}/container-topologies#cont-b-env-a[Container {EnterpriseTopology}]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/container-topologies#cont-a-env-a[Container {GrowthTopology}]
+
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/container-topologies#cont-b-env-a[Container {EnterpriseTopology}]
| Operator
| Red Hat OpenShift
| The Operator uses Red Hat OpenShift Operators to deploy {PlatformNameShort} within Red Hat OpenShift. Customers manage the product and infrastructure lifecycle.
a|
-* link:{URLTopologies}/ocp-topologies#ocp-a-env-a[Operator {GrowthTopology}]
-* link:{URLTopologies}/ocp-topologies#ocp-b-env-a[Operator {EnterpriseTopology}]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/ocp-topologies#ocp-a-env-a[Operator {GrowthTopology}]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/ocp-topologies#ocp-b-env-a[Operator {EnterpriseTopology}]
+
+| RPM | Virtual machines and bare metal | The RPM installer deploys {PlatformNameShort} on {RHEL} by using RPMs to install the platform on host machines. Customers manage the product and infrastructure lifecycle.
+a|
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/rpm-topologies#rpm-a-env-a[RPM {GrowthTopology}]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/tested_deployment_models/rpm-topologies#rpm-b-env-a[RPM {EnterpriseTopology}]
+
|====
diff --git a/downstream/modules/topologies/ref-ocp-a-env-a.adoc b/downstream/modules/topologies/ref-ocp-a-env-a.adoc
index 56139bb13c..0d32df81ef 100644
--- a/downstream/modules/topologies/ref-ocp-a-env-a.adoc
+++ b/downstream/modules/topologies/ref-ocp-a-env-a.adoc
@@ -44,18 +44,25 @@ Red{nbsp}Hat has tested the following configurations to install and run {Platfor
.Tested system configurations
[options="header"]
|====
-| Type | Description
-| Subscription | Valid {PlatformName} subscription
-| Operating system | {RHEL} 9.2 or later minor versions of {RHEL} 9
-| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power)
+| Type | Description | Notes
+| Subscription | Valid {PlatformName} subscription |
+| Operating system | {RHEL} 9.2 or later minor versions of {RHEL} 9 |
+| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) |
| Red Hat OpenShift
a|
* Version: 4.14
* num_of_control_nodes: 1
* num_of_worker_nodes: 1
-| Ansible-core | Ansible-core version {CoreUseVers} or later
-| Browser | A currently supported version of Mozilla Firefox or Google Chrome.
-| Database | {PostgresVers}
+|
+| Ansible-core | Ansible-core version {CoreUseVers} or later |
+| Browser | A currently supported version of Mozilla Firefox or Google Chrome. |
+| Database
+a|
+* For {PlatformNameShort} managed databases: {PostgresVers}
+* For customer provided (external) databases: {PostgresVers}, 16, or 17.
+a|
+* External (customer supported) databases require ICU support.
+* External databases using PostgreSQL 16 or 17 must rely on external backup and restore processes. Backup and restore functionality is dependent on utilities provided with {PostgresVers}.
|====
== Example custom resource file
diff --git a/downstream/snippets/rpm-env-a-tested-system-config.adoc b/downstream/snippets/rpm-env-a-tested-system-config.adoc
index 4593c2ffdb..35a89aabeb 100644
--- a/downstream/snippets/rpm-env-a-tested-system-config.adoc
+++ b/downstream/snippets/rpm-env-a-tested-system-config.adoc
@@ -4,12 +4,15 @@
|====
| Type | Description |
| Subscription | Valid {PlatformName} subscription |
-| Operating system
-a|
-* {RHEL} 8.8 or later minor versions of {RHEL} 8.
-* {RHEL} 9.2 or later minor versions of {RHEL} 9. |
+| Operating system | {RHEL} 9.4 or later minor versions of {RHEL} 9. |
| CPU architecture | x86_64, AArch64, s390x (IBM Z), ppc64le (IBM Power) |
| `ansible-core` | `ansible-core` version {CoreUseVers} or later | {PlatformNameShort} uses the system-wide ansible-core package to install the platform, but uses ansible-core 2.16 for both its control plane and built-in execution environments.
| Browser | A currently supported version of Mozilla Firefox or Google Chrome |
-| Database | {PostgresVers} | External (customer supported) databases require ICU support.
-|====
\ No newline at end of file
+| Database
+a|
+* For {PlatformNameShort} managed databases: {PostgresVers}
+* For customer provided (external) databases: {PostgresVers}, 16, or 17.
+a|
+* External (customer supported) databases require ICU support.
+* External databases using PostgreSQL 16 or 17 must rely on external backup and restore processes. Backup and restore functionality is dependent on utilities provided with {PostgresVers}.
+|====
diff --git a/downstream/titles/topologies/master.adoc b/downstream/titles/topologies/master.adoc
index 9b3ab3721c..ff960894d4 100644
--- a/downstream/titles/topologies/master.adoc
+++ b/downstream/titles/topologies/master.adoc
@@ -11,15 +11,15 @@ include::{Boilerplate}[]
include::topologies/assembly-overview-tested-deployment-models.adoc[leveloffset=+1]
-//RPM topologies
-include::topologies/assembly-rpm-topologies.adoc[leveloffset=+1]
-
//Container topologies
include::topologies/assembly-container-topologies.adoc[leveloffset=+1]
//Operator topologies
include::topologies/assembly-ocp-topologies.adoc[leveloffset=+1]
+//RPM topologies
+include::topologies/assembly-rpm-topologies.adoc[leveloffset=+1]
+
//Automation mesh nodes
include::topologies/topologies/ref-mesh-nodes.adoc[leveloffset=+1]
From 3a6e07e3e390e947f68d40b0c36976dafb8a0bd4 Mon Sep 17 00:00:00 2001
From: EMcWhinn <122449381+EMcWhinn@users.noreply.github.com>
Date: Wed, 30 Jul 2025 11:05:55 +0100
Subject: [PATCH 14/71] 2.6 Adding callback URL field info to OIDC auth section
(#3883) (#3937)
* Adding callback URL field info to OIDC auth section
[DOCS] Generic OIDC Authenticator Does Not Provide a Callback URL
https://issues.redhat.com/browse/AAP-49860
Affects `titles/central-auth`
* Tech review edit
---
.../modules/platform/proc-controller-set-up-generic-oidc.adoc | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc b/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc
index 50616d77cf..ee31c711d6 100644
--- a/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc
+++ b/downstream/modules/platform/proc-controller-set-up-generic-oidc.adoc
@@ -28,6 +28,10 @@ include::snippets/snip-gw-authentication-auto-migrate.adoc[]
* *Access Token URL*
* *Access Token Method*
* *Authorization URL*
+* *Callback URL* - The OIDC *Callback URL* field registers the service as a service provider (SP) with each OIDC provider you have configured.
+Leave this field blank.
+After you save this authentication method, it is auto generated.
+Configure your IdP to allow redirects to this URL as part of the authentication flow.
* *ID Key*
* *ID Token Issuer*
* *JWKS URI*
From bee5fabc835fe687c36b40296b619d24accb7b12 Mon Sep 17 00:00:00 2001
From: Ian Fowler <77341519+ianf77@users.noreply.github.com>
Date: Wed, 30 Jul 2025 13:24:58 +0100
Subject: [PATCH 15/71] DITA migration changes: CAG Ch 8 (#3940) (#3941)
Configuring automation execution UI and modular compliance chapter 8
https://issues.redhat.com/browse/AAP-46725
---
.../assembly-controller-log-files.adoc | 44 +---------------
.../platform/ref-controller-log-files.adoc | 51 +++++++++++++++++++
2 files changed, 52 insertions(+), 43 deletions(-)
create mode 100644 downstream/modules/platform/ref-controller-log-files.adoc
diff --git a/downstream/assemblies/platform/assembly-controller-log-files.adoc b/downstream/assemblies/platform/assembly-controller-log-files.adoc
index abd45e8609..a3254eb983 100644
--- a/downstream/assemblies/platform/assembly-controller-log-files.adoc
+++ b/downstream/assemblies/platform/assembly-controller-log-files.adoc
@@ -9,48 +9,6 @@
* `/var/log/tower/`
* `/var/log/supervisor/`
-In the `/var/log/tower/` directory, you can view logfiles captured by:
+include::platform/ref-controller-log-files.adoc[leveloffset=+1]
-* *tower.log:* Captures the log messages such as runtime errors that occur when the job is executed.
-* *callback_receiver.log:* Captures callback receiver logs that handles callback events when running ansible jobs.
-* *dispatcher.log:* Captures log messages for the {ControllerName} dispatcher worker service.
-* *job_lifecycle.log:* Captures details of the job run, whether it is blocked, and what condition is blocking it.
-* *management_playbooks.log:* Captures the logs of management playbook runs, and isolated job runs such as copying the metadata.
-* *rsyslog.err:* Captures rsyslog errors authenticating with external logging services when sending logs to them.
-* *task_system.log:* Captures the logs of tasks that {ControllerName} is running in the background, such as adding cluster instances and logs related to information gathering or processing for analytics.
-* *tower_rbac_migrations.log:* Captures the logs for rbac database migration or upgrade.
-* *tower_system_tracking_migrations.log:* Captures the logs of the controller system tracking migration or upgrade.
-* *wsbroadcast.log:* Captures the logs of websocket connections in the controller nodes.
-In the `/var/log/supervisor/` directory, you can view logfiles captured by:
-
-* *awx-callback-receiver.log:* Captures the log of callback receiver that handles callback events when running ansible jobs, managed by `supervisord`.
-* *awx-daphne.log:* Captures the logs of Websocket communication of WebUI.
-* *awx-dispatcher.log:* Captures the logs that occur when dispatching a task to an {ControllerName} instance, such as when running a job.
-* *awx-rsyslog.log:* Captures the logs for the `rsyslog` service.
-* *awx-uwsgi.log:* Captures the logs related to uWSGI, which is an application server.
-* *awx-wsbroadcast.log:* Captures the logs of the websocket service that is used by {ControllerName}.
-* *failure-event-handler.stderr.log:* Captures the standard errors for `/usr/bin/failure-event-handler` supervisord's subprocess.
-* *supervisord.log:* Captures the logs related to `supervisord` itself.
-* *wsrelay.log:* Captures the communication logs within the websocket relay server.
-* *ws_heartbeat.log:* Captures the periodic checks on the health of services running on the host.
-* *rsyslog_configurer.log:* Captures rsyslog configuration activity associated with authenticating with external logging services.
-
-The `/var/log/supervisor/` directory includes `stdout` files for all services as well.
-
-You can expect the following log paths to be generated by services used by {ControllerName} (and {PlatformNameShort}):
-
-* */var/log/nginx/*
-* */var/lib/pgsql/data/pg_log/*
-* */var/log/redis/*
-
-.Troubleshooting
-
-Error logs can be found in the following locations:
-
-* {ControllerNameStart} server errors are logged in `/var/log/tower`.
-* Supervisors logs can be found in `/var/log/supervisor/`.
-* Nginx web server errors are logged in the httpd error log.
-* Configure other {ControllerName} logging needs in `/etc/tower/conf.d/`.
-
-Explore client-side issues using the JavaScript console built into most browsers and report any errors to Ansible through the Red Hat Customer portal at: https://access.redhat.com/.
diff --git a/downstream/modules/platform/ref-controller-log-files.adoc b/downstream/modules/platform/ref-controller-log-files.adoc
new file mode 100644
index 0000000000..a6d4779e99
--- /dev/null
+++ b/downstream/modules/platform/ref-controller-log-files.adoc
@@ -0,0 +1,51 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-controller-log-files"]
+
+= Access {ControllerName} logfiles
+
+In the `/var/log/tower/` directory, you can view logfiles captured by:
+
+* *tower.log:* Captures the log messages such as runtime errors that occur when the job is executed.
+* *callback_receiver.log:* Captures callback receiver logs that handles callback events when running ansible jobs.
+* *dispatcher.log:* Captures log messages for the {ControllerName} dispatcher worker service.
+* *job_lifecycle.log:* Captures details of the job run, whether it is blocked, and what condition is blocking it.
+* *management_playbooks.log:* Captures the logs of management playbook runs, and isolated job runs such as copying the metadata.
+* *rsyslog.err:* Captures rsyslog errors authenticating with external logging services when sending logs to them.
+* *task_system.log:* Captures the logs of tasks that {ControllerName} is running in the background, such as adding cluster instances and logs related to information gathering or processing for analytics.
+* *tower_rbac_migrations.log:* Captures the logs for rbac database migration or upgrade.
+* *tower_system_tracking_migrations.log:* Captures the logs of the controller system tracking migration or upgrade.
+* *wsbroadcast.log:* Captures the logs of websocket connections in the controller nodes.
+
+In the `/var/log/supervisor/` directory, you can view logfiles captured by:
+
+* *awx-callback-receiver.log:* Captures the log of callback receiver that handles callback events when running ansible jobs, managed by `supervisord`.
+* *awx-daphne.log:* Captures the logs of Websocket communication of WebUI.
+* *awx-dispatcher.log:* Captures the logs that occur when dispatching a task to an {ControllerName} instance, such as when running a job.
+* *awx-rsyslog.log:* Captures the logs for the `rsyslog` service.
+* *awx-uwsgi.log:* Captures the logs related to uWSGI, which is an application server.
+* *awx-wsbroadcast.log:* Captures the logs of the websocket service that is used by {ControllerName}.
+* *failure-event-handler.stderr.log:* Captures the standard errors for `/usr/bin/failure-event-handler` supervisord's subprocess.
+* *supervisord.log:* Captures the logs related to `supervisord` itself.
+* *wsrelay.log:* Captures the communication logs within the websocket relay server.
+* *ws_heartbeat.log:* Captures the periodic checks on the health of services running on the host.
+* *rsyslog_configurer.log:* Captures rsyslog configuration activity associated with authenticating with external logging services.
+
+The `/var/log/supervisor/` directory includes `stdout` files for all services as well.
+
+You can expect the following log paths to be generated by services used by {ControllerName} (and {PlatformNameShort}):
+
+* */var/log/nginx/*
+* */var/lib/pgsql/data/pg_log/*
+* */var/log/redis/*
+
+.Troubleshooting
+
+Error logs can be found in the following locations:
+
+* {ControllerNameStart} server errors are logged in `/var/log/tower`.
+* Supervisors logs can be found in `/var/log/supervisor/`.
+* Nginx web server errors are logged in the httpd error log.
+* Configure other {ControllerName} logging needs in `/etc/tower/conf.d/`.
+
+Explore client-side issues using the JavaScript console built into most browsers and report any errors to Ansible through the Red Hat Customer portal at: https://access.redhat.com/.
\ No newline at end of file
From 9199a352faea21d3b8cea16b915c4ea932a75254 Mon Sep 17 00:00:00 2001
From: g-murray <147741787+g-murray@users.noreply.github.com>
Date: Thu, 31 Jul 2025 10:06:17 +0100
Subject: [PATCH 16/71] Adjusting wording for 2.6 version (#3945)
* Adjusting wording for 2.6 version
* PR suggestions
---
.../modules/platform/proc-operator-external-db-controller.adoc | 2 +-
.../modules/platform/proc-operator-external-db-gateway.adoc | 2 +-
downstream/modules/platform/proc-operator-external-db-hub.adoc | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/downstream/modules/platform/proc-operator-external-db-controller.adoc b/downstream/modules/platform/proc-operator-external-db-controller.adoc
index ddd852a5a4..ba3a322c6c 100644
--- a/downstream/modules/platform/proc-operator-external-db-controller.adoc
+++ b/downstream/modules/platform/proc-operator-external-db-controller.adoc
@@ -24,7 +24,7 @@ The external database must be a PostgreSQL database that is the version supporte
[NOTE]
====
-{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and PostgreSQL 16 and 17 for external databases.
+{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and additionally supports PostgreSQL 15, 16, and 17 for external databases.
If you choose to use an externally managed database with version 16 or 17 you must also rely on external backup and restore processes.
====
diff --git a/downstream/modules/platform/proc-operator-external-db-gateway.adoc b/downstream/modules/platform/proc-operator-external-db-gateway.adoc
index c299fbb532..5b8f240de4 100644
--- a/downstream/modules/platform/proc-operator-external-db-gateway.adoc
+++ b/downstream/modules/platform/proc-operator-external-db-gateway.adoc
@@ -44,7 +44,7 @@ The external database must be a PostgreSQL database that is the version supporte
[NOTE]
====
-{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and PostgreSQL 16 and 17 for external databases.
+{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and additionally supports PostgreSQL 15, 16, and 17 for external databases.
If you choose to use an externally managed database with version 16 or 17 you must also rely on external backup and restore processes.
====
diff --git a/downstream/modules/platform/proc-operator-external-db-hub.adoc b/downstream/modules/platform/proc-operator-external-db-hub.adoc
index 27c3e0a137..68e8ef6919 100644
--- a/downstream/modules/platform/proc-operator-external-db-hub.adoc
+++ b/downstream/modules/platform/proc-operator-external-db-hub.adoc
@@ -25,7 +25,7 @@ The external postgres instance credentials and connection information will need
[NOTE]
====
-{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and PostgreSQL 16 and 17 for external databases.
+{PlatformNameShort} {PlatformVers} supports {PostgresVers} for its managed databases and additionally supports PostgreSQL 15, 16, and 17 for external databases.
If you choose to use an externally managed database with version 16 or 17 you must also rely on external backup and restore processes.
====
From 40abadf1d7aec8a4271b88221117823e06cdeee3 Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Thu, 31 Jul 2025 10:19:17 +0100
Subject: [PATCH 17/71] Add backup/restore note to Containerized installation
(#3946) (#3948)
Containerized installation - Update backup & restore procedures to ensure clarity regarding the latest version prerequisite
https://issues.redhat.com/browse/AAP-46266
---
.../modules/platform/proc-backup-aap-container.adoc | 9 +++++++++
.../modules/platform/proc-restore-aap-container.adoc | 11 ++++++++++-
2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/downstream/modules/platform/proc-backup-aap-container.adoc b/downstream/modules/platform/proc-backup-aap-container.adoc
index 3da00566f9..fde7d061bb 100644
--- a/downstream/modules/platform/proc-backup-aap-container.adoc
+++ b/downstream/modules/platform/proc-backup-aap-container.adoc
@@ -4,8 +4,17 @@
= Backing up containerized {PlatformNameShort}
+[role="_abstract"]
Perform a backup of your {ContainerBase} of {PlatformNameShort}.
+[NOTE]
+====
+When backing up {PlatformNameShort}, use the installation program that matches your currently installed version of {PlatformNameShort}.
+
+Backup functionality only works with the PostgreSQL versions supported by your current {PlatformNameShort} version.
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/aap-containerized-installation#system-requirements[System requirements].
+====
+
.Prerequisites
* You have logged in to the {RHEL} host as your dedicated non-root user.
diff --git a/downstream/modules/platform/proc-restore-aap-container.adoc b/downstream/modules/platform/proc-restore-aap-container.adoc
index cba4808307..0be6e84ea0 100644
--- a/downstream/modules/platform/proc-restore-aap-container.adoc
+++ b/downstream/modules/platform/proc-restore-aap-container.adoc
@@ -3,11 +3,20 @@
[id="proc-restore-aap-container"]
= Restoring containerized {PlatformNameShort}
+[role="_abstract"]
Restore your {ContainerBase} of {PlatformNameShort} from a backup, or to a different environment.
+[NOTE]
+====
+When restoring {PlatformNameShort}, use the latest installation program available at the time of the restore. For example, if you are restoring a backup taken from version `2.6-1`, use the latest `2.6-x` installation program available at the time of the restore.
+
+Restore functionality only works with the PostgreSQL versions supported by your current {PlatformNameShort} version.
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/aap-containerized-installation#system-requirements[System requirements].
+====
+
.Prerequisites
* You have logged in to the {RHEL} host as your dedicated non-root user.
-* You have a backup of your {PlatformNameShort} deployment. For more information, see link:{URLContainerizedInstall}/aap-containerized-installation#backing-up-containerized-ansible-automation-platform[Backing up container-based {PlatformNameShort}].
+* You have a backup of your {PlatformNameShort} deployment. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/containerized_installation/aap-containerized-installation#backing-up-containerized-ansible-automation-platform[Backing up containerized {PlatformNameShort}].
* If restoring to a different environment with the same hostnames, you have performed a fresh installation on the target environment with the same topology as the original (source) environment.
* You have ensured that the administrator credentials on the target environment match the administrator credentials from the source environment.
From 50da8b93118002fab4a12edeb9e77210ecda75dc Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Thu, 31 Jul 2025 10:31:23 +0100
Subject: [PATCH 18/71] Update controller version (#3950) (#3951)
Incorrect Controller version in documentation
https://issues.redhat.com/browse/AAP-44204
---
.../assemblies/platform/assembly-aap-upgrading-platform.adoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc b/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc
index 85c5056c68..3aec1d6227 100644
--- a/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc
+++ b/downstream/assemblies/platform/assembly-aap-upgrading-platform.adoc
@@ -20,7 +20,7 @@ You can then download the desired version of the {PlatformNameShort} installer,
* Prior to upgrading your {PlatformName}, ensure you have reviewed {LinkPlanningGuide} for a successful upgrade. You can then download the desired version of the {PlatformNameShort} installer, configure the inventory file in the installation bundle to reflect your environment, and then run the installer.
-* Prior to upgrading your {PlatformName}, ensure you have upgraded to {ControllerName} 2.5 or later.
+* Before upgrading your {PlatformName}, ensure you have upgraded to {ControllerName} 4.5 or later.
* When upgrading to {PlatformNameShort} {PlatformVers}, you must use RPM installer version 2.5-11 or later. If you use an older installer, the installation might fail. If you encounter a failed installation using an older version of the installer, rerun the installation with RPM installer version 2.5-11 or later.
From 7f152fe239861f97a313f0781a8ed87612cbbff4 Mon Sep 17 00:00:00 2001
From: Ian Fowler <77341519+ianf77@users.noreply.github.com>
Date: Thu, 31 Jul 2025 11:21:38 +0100
Subject: [PATCH 19/71] 2.6 DITA migration changes: CAG Ch 9 (#3953) (#3955)
Configuring automation execution UI and modular compliance chapter 9
https://issues.redhat.com/browse/AAP-46726
---
...assembly-controller-logging-aggregation.adoc | 17 ++++++++++++++++-
.../proc-controller-set-up-logging.adoc | 2 +-
.../ref-controller-activity-stream-schema.adoc | 10 ++++++++--
.../ref-controller-job-status-changes.adoc | 2 +-
.../ref-controller-log-aggregators.adoc | 8 ++++----
.../modules/platform/ref-controller-logs.adoc | 5 +++--
...ef-controller-scan-fact-tracking-schema.adoc | 2 +-
.../ref-controller-troubleshoot-logging.adoc | 16 ++++++----------
8 files changed, 40 insertions(+), 22 deletions(-)
diff --git a/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc b/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc
index 40b0e37425..0cb305cdce 100644
--- a/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc
+++ b/downstream/assemblies/platform/assembly-controller-logging-aggregation.adoc
@@ -46,17 +46,32 @@ For example, if `Splunk` goes offline, `rsyslogd` stores a queue on the disk unt
By default, it stores up to 1GB of events (while Splunk is offline) but you can increase that to more than 1GB if necessary, or change the path where you save the queue.
include::platform/ref-controller-loggers.adoc[leveloffset=+1]
+
include::platform/ref-controller-log-message-schema.adoc[leveloffset=+2]
+
include::platform/ref-controller-activity-stream-schema.adoc[leveloffset=+2]
-include::platform/ref-controller-job-event-schema.adoc[leveloffset=+2]
+
+// Included this in activity-stream
+//include::platform/ref-controller-job-event-schema.adoc[leveloffset=+2]
+
include::platform/ref-controller-scan-fact-tracking-schema.adoc[leveloffset=+2]
+
include::platform/ref-controller-job-status-changes.adoc[leveloffset=+2]
+
include::platform/ref-controller-logs.adoc[leveloffset=+2]
+
include::platform/ref-controller-log-aggregators.adoc[leveloffset=+2]
+
include::platform/ref-controller-logging-splunk.adoc[leveloffset=+3]
+
include::platform/ref-controller-logging-loggly.adoc[leveloffset=+3]
+
include::platform/ref-controller-logging-sumologic.adoc[leveloffset=+3]
+
include::platform/ref-controller-logging-elastic-stack.adoc[leveloffset=+3]
+
include::platform/proc-controller-set-up-logging.adoc[leveloffset=+1]
+
include::platform/proc-controller-api-4xx-error-config.adoc[leveloffset=2]
+
include::platform/ref-controller-troubleshoot-logging.adoc[leveloffset=+1]
diff --git a/downstream/modules/platform/proc-controller-set-up-logging.adoc b/downstream/modules/platform/proc-controller-set-up-logging.adoc
index 8555ae8991..efd859654d 100644
--- a/downstream/modules/platform/proc-controller-set-up-logging.adoc
+++ b/downstream/modules/platform/proc-controller-set-up-logging.adoc
@@ -57,7 +57,7 @@ Equivalent to the `rsyslogd queue.maxdiskspace` setting on the action (e.g. `omh
It stores files in the directory specified by `LOG_AGGREGATOR_MAX_DISK_USAGE_PATH`.
* *File system location for rsyslogd disk persistence*: Location to persist logs that should be retried after an outage of the external log aggregator (defaults to `/var/lib/awx`).
Equivalent to the `rsyslogd queue.spoolDirectory` setting.
-* *Log Format For API 4XX Errors*: Configure a specific error message. For more information, see link:{URLControllerAdminGuide}/assembly-controller-logging-aggregation#proc-controller-api-4xx-error-config[API 4XX Error Configuration].
+* *Log Format For API 4XX Errors*: Configure a specific error message. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#proc-controller-api-4xx-error-config[API 4XX Error Configuration].
Set the following options:
diff --git a/downstream/modules/platform/ref-controller-activity-stream-schema.adoc b/downstream/modules/platform/ref-controller-activity-stream-schema.adoc
index f27fbccc62..cee888b927 100644
--- a/downstream/modules/platform/ref-controller-activity-stream-schema.adoc
+++ b/downstream/modules/platform/ref-controller-activity-stream-schema.adoc
@@ -4,7 +4,7 @@
= Activity stream schema
-This uses the fields common to all loggers listed in xref:ref-controller-log-message-schema[Log message schema].
+This uses the fields common to all loggers listed in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers)/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-log-message-schema[Log message schema].
It has the following additional fields:
@@ -12,4 +12,10 @@ It has the following additional fields:
* `changes`: JSON summary of what fields changed, and their old or new values.
* `operation`: The basic category of the changes logged in the activity stream, for instance, "associate".
* `object1`: Information about the primary object being operated on, consistent with what is shown in the activity stream.
-* `object2`: If applicable, the second object involved in the action.
\ No newline at end of file
+* `object2`: If applicable, the second object involved in the action.
+
+This logger reflects the data being saved into job events, except when they would otherwise conflict with expected standard fields from the logger, in which case the fields are nested.
+Notably, the field host on the `job_event` model is given as `event_host`.
+There is also a sub-dictionary field, `event_data` within the payload, which contains different fields depending on the specifics of the Ansible event.
+
+This logger also includes the common fields in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-log-message-schema[Log message schema].
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-controller-job-status-changes.adoc b/downstream/modules/platform/ref-controller-job-status-changes.adoc
index 03d76f4650..edb153c795 100644
--- a/downstream/modules/platform/ref-controller-job-status-changes.adoc
+++ b/downstream/modules/platform/ref-controller-job-status-changes.adoc
@@ -6,4 +6,4 @@
This is a lower-volume source of information about changes in job states compared to job events, and captures changes to types of unified jobs other than job template based jobs.
-This logger also includes the common fields in xref:ref-controller-log-message-schema[Log message schema] and fields present on the job model.
\ No newline at end of file
+This logger also includes the common fields in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-log-message-schema[Log message schema] and fields present on the job model.
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-controller-log-aggregators.adoc b/downstream/modules/platform/ref-controller-log-aggregators.adoc
index f40c63560d..447c733ac6 100644
--- a/downstream/modules/platform/ref-controller-log-aggregators.adoc
+++ b/downstream/modules/platform/ref-controller-log-aggregators.adoc
@@ -6,7 +6,7 @@
The logging aggregator service works with the following monitoring and data analysis systems:
-* xref:ref-controller-logging-splunk[Splunk]
-* xref:ref-controller-logging-loggly[Loggly]
-* xref:ref-controller-logging-sumologic[Sumologic]
-* xref:ref-controller-logging-elastic-stack[Elastic Stack (formerly ELK stack)]
\ No newline at end of file
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{Plaformvers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-logging-splunk[Splunk]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-logging-loggly[Loggly]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-logging-sumologic[Sumologic]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlaformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-logging-elastic-stack[Elastic Stack (formerly ELK stack)]
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-controller-logs.adoc b/downstream/modules/platform/ref-controller-logs.adoc
index 36b34a2048..1cf63b7b3b 100644
--- a/downstream/modules/platform/ref-controller-logs.adoc
+++ b/downstream/modules/platform/ref-controller-logs.adoc
@@ -4,8 +4,9 @@
= {ControllerNameStart} logs
-This logger also includes the common fields in xref:ref-controller-log-message-schema[Log message schema].
+This logger also includes the common fields in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-log-message-schema[Log message schema].
In addition, this contains a `msg` field with the log message.
Errors contain a separate `traceback` field.
-From the navigation panel, select {MenuSetLogging}. On the *Logging Settings* page click btn:[Edit] and use the *ENABLE EXTERNAL LOGGING* option to enable or disable the logging components.
+From the navigation panel, select {MenuSetLogging}.
+On the *Logging Settings* page click btn:[Edit] and use the *ENABLE EXTERNAL LOGGING* option to enable or disable the logging components.
diff --git a/downstream/modules/platform/ref-controller-scan-fact-tracking-schema.adoc b/downstream/modules/platform/ref-controller-scan-fact-tracking-schema.adoc
index c12dc3baf5..cad23f8648 100644
--- a/downstream/modules/platform/ref-controller-scan-fact-tracking-schema.adoc
+++ b/downstream/modules/platform/ref-controller-scan-fact-tracking-schema.adoc
@@ -17,4 +17,4 @@ Periods are not allowed by elastic search in names, and are replaced with "_" by
* `host`: Name of the host the scan applies to.
* `inventory_id`: The inventory id the host is inside of.
-This logger also includes the common fields in xref:ref-controller-log-message-schema[Log message schema].
\ No newline at end of file
+This logger also includes the common fields in link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#ref-controller-log-message-schema[Log message schema].
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-controller-troubleshoot-logging.adoc b/downstream/modules/platform/ref-controller-troubleshoot-logging.adoc
index 2f906669ac..c163c44c29 100644
--- a/downstream/modules/platform/ref-controller-troubleshoot-logging.adoc
+++ b/downstream/modules/platform/ref-controller-troubleshoot-logging.adoc
@@ -4,27 +4,23 @@
= Troubleshooting logging
-[discrete]
-=== Logging Aggregation
+*Logging Aggregation*
If you have sent a message with the test button to your configured logging service through http or https, but did not receive the message, check the `/var/log/tower/rsyslog.err` log file.
This is where errors are stored if they occurred when authenticating rsyslog with an http or https external logging service.
Note that if there are no errors, this file does not exist.
-[discrete]
-=== API 4XX Errors
+*API 4XX Errors*
You can include the API error message for 4XX errors by modifying the log format for those messages.
-Refer to the xref:proc-controller-api-4xx-error-config[API 4XX Error Configuration].
+Refer to the link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#proc-controller-api-4xx-error-config[API 4XX Error Configuration].
-[discrete]
-=== LDAP
+*LDAP*
You can enable logging messages for the LDAP adapter.
-For more information, see xref:proc-controller-api-4xx-error-config[API 4XX Error Configuration].
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/{PlatformVers}/html/configuring_automation_execution/assembly-controller-logging-aggregation#proc-controller-api-4xx-error-config[API 4XX Error Configuration].
-[discrete]
-=== SAML
+*SAML*
You can enable logging messages for the SAML adapter the same way you can enable logging for LDAP.
//Refer to the xref:controller-enable-logging-LDAP[Enabling logging for LDAP] section for more detail.
\ No newline at end of file
From 1bb1d361c52bab56ee4c082039968d1be9afa7a1 Mon Sep 17 00:00:00 2001
From: Ian Fowler <77341519+ianf77@users.noreply.github.com>
Date: Thu, 31 Jul 2025 11:53:48 +0100
Subject: [PATCH 20/71] Add reference to AWS (#3957) (#3959)
Include specific reference to AWS Managed coverage within automation mesh docs for operator-based envs.
https://issues.redhat.com/browse/AAP-50358
---
downstream/assemblies/platform/assembly-planning-mesh.adoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/downstream/assemblies/platform/assembly-planning-mesh.adoc b/downstream/assemblies/platform/assembly-planning-mesh.adoc
index 9ebccf69cc..7bcd76758e 100644
--- a/downstream/assemblies/platform/assembly-planning-mesh.adoc
+++ b/downstream/assemblies/platform/assembly-planning-mesh.adoc
@@ -19,7 +19,7 @@ Simple to complex topology examples are included to illustrate the various ways
endif::mesh-VM[]
ifdef::operator-mesh[]
The following topics contain information to help plan an {AutomationMesh} deployment in your operator-based {PlatformName} environment.
-The document covers the setting up of {AutomationMesh} on operator-based deployments, such as {OCPShort} and {PlatformNameShort} on {Azure} managed application.
+The document covers the setting up of {AutomationMesh} on operator-based deployments, such as {OCPShort} and {PlatformNameShort} on {AWS} (AWS) and {Azure} managed applications.
endif::operator-mesh[]
include::platform/con-about-automation-mesh.adoc[leveloffset=+1]
From 584bba9d865d1c03e908a2caa2c2c6cd52a5c5fa Mon Sep 17 00:00:00 2001
From: Ian Fowler <77341519+ianf77@users.noreply.github.com>
Date: Thu, 31 Jul 2025 13:09:24 +0100
Subject: [PATCH 21/71] Corrected links (#3935) (#3961)
Broken Link Report July 21 2025
https://issues.redhat.com/browse/AAP-50562
LGTM
---
.../platform/ref-controller-credential-hashiCorp-vault.adoc | 2 +-
.../modules/platform/ref-controller-credential-network.adoc | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc b/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc
index 615276f5db..9fd8ee9466 100644
--- a/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc
+++ b/downstream/modules/platform/ref-controller-credential-hashiCorp-vault.adoc
@@ -6,4 +6,4 @@
This is considered part of the secret management capability.
-For more information, see link:{URLControllerAdminGuide}/controller-credentials#ref-controller-credential-hasiCorp-secret[HashiCorp Vault Signed SSH].
\ No newline at end of file
+For more information, see link:{URLControllerAdminGuide}/assembly-controller-secret-management#ref-hashicorp-signed-ssh[HashiCorp Vault Signed SSH].
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-controller-credential-network.adoc b/downstream/modules/platform/ref-controller-credential-network.adoc
index 5d183bb299..7c8755369d 100644
--- a/downstream/modules/platform/ref-controller-credential-network.adoc
+++ b/downstream/modules/platform/ref-controller-credential-network.adoc
@@ -14,7 +14,7 @@ When connecting to network devices, the credential type must match the connectio
* For `local` connections using `provider`, credential type should be *Network*.
* For all other network connections (`httpapi`, `netconf`, and `network_cli`), the credential type should be *Machine*.
-For more information about connection types available for network devices, see link:{URLControllerUserGuide}/using_automation_execution/controller-credentials#ref-controller-multiple-connection-protocols[Multiple Communication Protocols].
+For more information about connection types available for network devices, see link:{URLControllerUserGuide}/controller-credentials#ref-controller-multiple-connection-protocols[Multiple Communication Protocols].
{ControllerNameStart} uses the following environment variables for Network credentials:
From bfe9ad143ba9dac8d208de90aa4319fd866a6afd Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Thu, 31 Jul 2025 13:44:00 +0100
Subject: [PATCH 22/71] Update docs for mod docs (#3956) (#3962)
Affected titles:
- `titles/aap-containerized-install`
- `titles/topologies`
- `titles/aap-migration`
- `titles/troubleshooting-aap`
Ansible DITA readiness (2025-07-09) - Michelle
https://issues.redhat.com/browse/AAP-49393
---
.../aap-migration/assembly-migration-artifact.adoc | 1 +
.../assembly-migration-prerequisites.adoc | 1 +
.../aap-migration/assembly-source-containerized.adoc | 1 +
.../assemblies/aap-migration/assembly-source-rpm.adoc | 1 +
.../aap-migration/assembly-target-containerized.adoc | 1 +
.../aap-migration/assembly-target-managed-aap.adoc | 1 +
.../assemblies/aap-migration/assembly-target-ocp.adoc | 1 +
.../assembly-aap-containerized-installation.adoc | 1 +
.../assembly-appendix-inventory-file-vars.adoc | 2 +-
...embly-appendix-troubleshoot-containerized-aap.adoc | 1 +
.../platform/assembly-configure-hub-storage.adoc | 1 +
.../assembly-setup-postgresql-ext-database.adoc | 11 ++++++-----
.../assembly-using-custom-tls-certificates.adoc | 1 +
.../assembly-appendix-topology-resources.adoc | 3 +++
.../topologies/assembly-container-topologies.adoc | 2 ++
.../topologies/assembly-ocp-topologies.adoc | 2 ++
.../assembly-overview-tested-deployment-models.adoc | 3 +++
.../topologies/assembly-rpm-topologies.adoc | 1 +
.../assembly-diagnosing-the-problem.adoc | 1 +
.../assembly-troubleshoot-backup-recovery.adoc | 5 ++++-
.../assembly-troubleshoot-controller.adoc | 5 ++++-
.../assembly-troubleshoot-execution-environments.adoc | 3 ++-
.../assembly-troubleshoot-installation.adoc | 3 ++-
.../assembly-troubleshoot-jobs.adoc | 1 +
.../assembly-troubleshoot-networking.adoc | 3 ++-
.../assembly-troubleshoot-playbooks.adoc | 4 +++-
.../assembly-troubleshoot-upgrade.adoc | 5 +++--
.../modules/aap-migration/con-artifact-structure.adoc | 1 +
.../con-containerized-to-managed-prerequisites.adoc | 1 +
.../con-containerized-to-ocp-prerequisites.adoc | 1 +
.../con-introduction-and-objectives.adoc | 1 +
.../modules/aap-migration/con-manifest-file.adoc | 1 +
.../aap-migration/con-migration-process-overview.adoc | 1 +
.../modules/aap-migration/con-out-of-scope.adoc | 1 +
.../con-rpm-to-containerized-prerequisites.adoc | 1 +
.../con-rpm-to-managed-prerequisites.adoc | 1 +
.../aap-migration/con-rpm-to-ocp-prerequisites.adoc | 1 +
.../modules/aap-migration/con-secrets-file.adoc | 3 ++-
.../aap-migration/proc-containerized-post-import.adoc | 1 +
.../proc-containerized-target-import.adoc | 1 +
.../aap-migration/proc-containerized-target-prep.adoc | 1 +
.../aap-migration/proc-containerized-validation.adoc | 1 +
.../aap-migration/proc-managed-post-import.adoc | 1 +
.../aap-migration/proc-managed-target-migration.adoc | 3 +++
.../modules/aap-migration/proc-ocp-post-import.adoc | 1 +
.../modules/aap-migration/proc-ocp-target-import.adoc | 1 +
.../modules/aap-migration/proc-ocp-target-prep.adoc | 1 +
.../modules/aap-migration/proc-ocp-validation.adoc | 1 +
.../proc-rpm-environment-source-prep.adoc | 1 +
.../proc-rpm-source-environment-export.adoc | 1 +
.../ref-migration-artifact-checklist.adoc | 1 +
.../con-certs-per-service-considerations.adoc | 1 +
.../platform/con-installer-generated-certs.adoc | 1 +
.../platform/con-receptor-cert-considerations.adoc | 1 +
.../platform/proc-add-eda-safe-plugin-var.adoc | 2 +-
.../modules/platform/proc-configure-ext-db-mtls.adoc | 1 +
.../proc-configure-haproxy-load-balancer.adoc | 1 +
.../platform/proc-configure-hub-azure-storage.adoc | 1 +
.../platform/proc-configure-hub-nfs-storage.adoc | 1 +
.../platform/proc-configure-hub-s3-storage.adoc | 1 +
...roc-containerized-troubleshoot-gathering-logs.adoc | 5 +----
.../platform/proc-downloading-containerized-aap.adoc | 1 +
.../platform/proc-enable-hstore-extension.adoc | 1 +
...tomation-hub-collection-and-container-signing.adoc | 1 +
.../platform/proc-installing-containerized-aap.adoc | 1 +
...-managed-nodes-for-containerized-installation.adoc | 1 +
...-the-rhel-host-for-containerized-installation.adoc | 1 +
.../modules/platform/proc-provide-custom-ca-cert.adoc | 1 +
.../proc-provide-custom-tls-certs-per-service.adoc | 1 +
.../platform/proc-reinstalling-containerized-aap.adoc | 1 -
.../platform/proc-set-registry-username-password.adoc | 1 +
.../platform/proc-setup-ext-db-with-admin-creds.adoc | 1 +
.../proc-setup-ext-db-without-admin-creds.adoc | 1 +
.../platform/proc-uninstalling-containerized-aap.adoc | 1 +
.../modules/platform/proc-update-aap-container.adoc | 1 +
.../modules/platform/proc-use-custom-ca-certs.adoc | 1 +
.../modules/platform/ref-adding-execution-nodes.adoc | 4 +---
.../platform/ref-ansible-inventory-variables.adoc | 1 +
.../platform/ref-configuring-inventory-file.adoc | 1 +
.../platform/ref-cont-aap-system-requirements.adoc | 1 +
.../ref-containerized-troubleshoot-config.adoc | 3 +++
.../ref-containerized-troubleshoot-diagnosing.adoc | 1 +
.../ref-containerized-troubleshoot-install.adoc | 3 +++
.../platform/ref-containerized-troubleshoot-ref.adoc | 3 +++
.../modules/platform/ref-controller-variables.adoc | 3 +++
.../platform/ref-database-inventory-variables.adoc | 3 +++
.../platform/ref-eda-controller-variables.adoc | 3 +++
.../modules/platform/ref-gateway-variables.adoc | 4 +++-
.../platform/ref-general-inventory-variables.adoc | 3 +++
downstream/modules/platform/ref-hub-variables.adoc | 3 +++
.../platform/ref-images-inventory-variables.adoc | 3 +++
.../platform/ref-receptor-inventory-variables.adoc | 3 +++
.../platform/ref-redis-inventory-variables.adoc | 3 +++
downstream/modules/topologies/ref-cont-a-env-a.adoc | 1 +
downstream/modules/topologies/ref-cont-b-env-a.adoc | 1 +
.../ref-installation-deployment-models.adoc | 1 +
downstream/modules/topologies/ref-mesh-nodes.adoc | 2 ++
downstream/modules/topologies/ref-ocp-a-env-a.adoc | 2 ++
downstream/modules/topologies/ref-ocp-b-env-a.adoc | 2 ++
downstream/modules/topologies/ref-rpm-a-env-a.adoc | 1 +
downstream/modules/topologies/ref-rpm-b-env-a.adoc | 1 +
.../proc-troubleshoot-aap-packages.adoc | 4 +++-
.../proc-troubleshoot-job-pending.adoc | 2 ++
.../proc-troubleshoot-job-permissions.adoc | 1 +
.../proc-troubleshoot-job-resolve-module.adoc | 1 +
.../proc-troubleshoot-job-timeout.adoc | 1 +
.../proc-troubleshoot-must-gather.adoc | 1 +
.../proc-troubleshoot-sosreport.adoc | 2 ++
.../proc-troubleshoot-ssl-tls-issues.adoc | 1 +
.../proc-troubleshoot-subnet-conflict.adoc | 2 ++
.../proc-troubleshoot-use-in-controller.adoc | 7 +++----
111 files changed, 171 insertions(+), 30 deletions(-)
diff --git a/downstream/assemblies/aap-migration/assembly-migration-artifact.adoc b/downstream/assemblies/aap-migration/assembly-migration-artifact.adoc
index 35befe8600..6ab9730bf9 100644
--- a/downstream/assemblies/aap-migration/assembly-migration-artifact.adoc
+++ b/downstream/assemblies/aap-migration/assembly-migration-artifact.adoc
@@ -3,6 +3,7 @@
[id="migration-artifact"]
= Migration artifact structure and verification
+[role="_abstract"]
The migration artifact is a critical component for successfully transferring your {PlatformNameShort} deployment. It packages all necessary data and configurations from your source environment.
This section details the structure of the migration artifact and includes a migration checklist for artifact verification.
diff --git a/downstream/assemblies/aap-migration/assembly-migration-prerequisites.adoc b/downstream/assemblies/aap-migration/assembly-migration-prerequisites.adoc
index da1e32a941..8abcdd9b69 100644
--- a/downstream/assemblies/aap-migration/assembly-migration-prerequisites.adoc
+++ b/downstream/assemblies/aap-migration/assembly-migration-prerequisites.adoc
@@ -3,6 +3,7 @@
[id="migration-prerequisites"]
= Migration prerequisites
+[role="_abstract"]
Prerequisites for migrating your {PlatformNameShort} deployment. For your specific migration path, ensure that you meet all necessary conditions before proceeding.
include::aap-migration/con-rpm-to-containerized-prerequisites.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/aap-migration/assembly-source-containerized.adoc b/downstream/assemblies/aap-migration/assembly-source-containerized.adoc
index ea79fe0f99..b380de83f7 100644
--- a/downstream/assemblies/aap-migration/assembly-source-containerized.adoc
+++ b/downstream/assemblies/aap-migration/assembly-source-containerized.adoc
@@ -3,6 +3,7 @@
[id="source-containerized"]
= Container-based {PlatformNameShort}
+[role="_abstract"]
Prepare and export data from your container-based {PlatformNameShort} deployment.
include::aap-migration/proc-containerized-source-environment-preparation-assessment.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/aap-migration/assembly-source-rpm.adoc b/downstream/assemblies/aap-migration/assembly-source-rpm.adoc
index 9b661f385f..d6b54a9617 100644
--- a/downstream/assemblies/aap-migration/assembly-source-rpm.adoc
+++ b/downstream/assemblies/aap-migration/assembly-source-rpm.adoc
@@ -3,6 +3,7 @@
[id="source-rpm"]
= RPM-based {PlatformNameShort}
+[role="_abstract"]
Prepare and export data from your RPM-based {PlatformNameShort} deployment.
include::aap-migration/proc-rpm-environment-source-prep.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/aap-migration/assembly-target-containerized.adoc b/downstream/assemblies/aap-migration/assembly-target-containerized.adoc
index bed0505c07..216b25bab3 100644
--- a/downstream/assemblies/aap-migration/assembly-target-containerized.adoc
+++ b/downstream/assemblies/aap-migration/assembly-target-containerized.adoc
@@ -3,6 +3,7 @@
[id="target-containerized"]
= Container-based {PlatformNameShort}
+[role="_abstract"]
Prepare and assess your target container-based {PlatformNameShort} environment, and import and reconcile your migrated content.
include::aap-migration/proc-containerized-target-prep.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/aap-migration/assembly-target-managed-aap.adoc b/downstream/assemblies/aap-migration/assembly-target-managed-aap.adoc
index 4c075684c6..0baf95c287 100644
--- a/downstream/assemblies/aap-migration/assembly-target-managed-aap.adoc
+++ b/downstream/assemblies/aap-migration/assembly-target-managed-aap.adoc
@@ -3,6 +3,7 @@
[id="target-managed-aap"]
= Managed {PlatformNameShort}
+[role="_abstract"]
Prepare and migrate your source environment to a Managed {PlatformNameShort} deployment, and reconcile the target environment post-migration.
include::aap-migration/proc-managed-target-migration.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/aap-migration/assembly-target-ocp.adoc b/downstream/assemblies/aap-migration/assembly-target-ocp.adoc
index 85d07a74e9..b18e32454c 100644
--- a/downstream/assemblies/aap-migration/assembly-target-ocp.adoc
+++ b/downstream/assemblies/aap-migration/assembly-target-ocp.adoc
@@ -3,6 +3,7 @@
[id="target-ocp"]
= {OCPShort}
+[role="_abstract"]
Prepare and assess your target {OCPShort} environment, and import and reconcile your migrated content.
include::aap-migration/proc-ocp-target-prep.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc b/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc
index 1dacd46dc3..fa22ecada8 100644
--- a/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc
+++ b/downstream/assemblies/platform/assembly-aap-containerized-installation.adoc
@@ -6,6 +6,7 @@ ifdef::context[:parent-context: {context}]
:context: aap-containerized-installation
+[role="_abstract"]
{PlatformNameShort} is a commercial offering that helps teams manage complex multi-tier deployments by adding control, knowledge, and delegation to Ansible-powered environments.
This guide helps you to understand the installation requirements and processes behind the containerized version of {PlatformNameShort}.
diff --git a/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc b/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc
index 04934674cb..7992e1b5e8 100644
--- a/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc
+++ b/downstream/assemblies/platform/assembly-appendix-inventory-file-vars.adoc
@@ -3,7 +3,7 @@
[id="appendix-inventory-files-vars"]
= Inventory file variables
-
+[role="_abstract"]
The following tables contain information about the variables used in {PlatformNameShort}'s installation `inventory` files. The tables include the variables that you can use for RPM-based installation and {ContainerBase}.
include::platform/ref-ansible-inventory-variables.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc b/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc
index 00013798f5..46750229ab 100644
--- a/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc
+++ b/downstream/assemblies/platform/assembly-appendix-troubleshoot-containerized-aap.adoc
@@ -7,6 +7,7 @@ ifdef::context[:parent-context: {context}]
:context: troubleshooting-containerized-aap
+[role="_abstract"]
Use this information to troubleshoot your containerized {PlatformNameShort} installation.
include::platform/proc-containerized-troubleshoot-gathering-logs.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/platform/assembly-configure-hub-storage.adoc b/downstream/assemblies/platform/assembly-configure-hub-storage.adoc
index 1608429921..6a485f8c68 100644
--- a/downstream/assemblies/platform/assembly-configure-hub-storage.adoc
+++ b/downstream/assemblies/platform/assembly-configure-hub-storage.adoc
@@ -5,6 +5,7 @@ ifdef::context[:parent-context: {context}]
= Configuring storage for {HubName}
+[role="_abstract"]
Configure storage backends for {HubName} including Amazon S3, Azure Blob Storage, and Network File System (NFS) storage.
include::platform/proc-configure-hub-s3-storage.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/platform/assembly-setup-postgresql-ext-database.adoc b/downstream/assemblies/platform/assembly-setup-postgresql-ext-database.adoc
index fe69b56cbc..0a9dc34854 100644
--- a/downstream/assemblies/platform/assembly-setup-postgresql-ext-database.adoc
+++ b/downstream/assemblies/platform/assembly-setup-postgresql-ext-database.adoc
@@ -5,6 +5,12 @@ ifdef::context[:parent-context: {context}]
= Setting up a customer provided (external) database
+[role="_abstract"]
+There are two possible scenarios for setting up an external database:
+
+. An external database with PostgreSQL admin credentials
+. An external database without PostgreSQL admin credentials
+
[IMPORTANT]
====
* When using an external database with {PlatformNameShort}, you must create and maintain that database. Ensure that you clear your external database when uninstalling {PlatformNameShort}.
@@ -14,11 +20,6 @@ ifdef::context[:parent-context: {context}]
* During configuration of an external database, you must check the external database coverage. For more information, see link:https://access.redhat.com/articles/4010491[{PlatformName} Database Scope of Coverage].
====
-There are two possible scenarios for setting up an external database:
-
-. An external database with PostgreSQL admin credentials
-. An external database without PostgreSQL admin credentials
-
include::platform/proc-setup-ext-db-with-admin-creds.adoc[leveloffset=+1]
include::platform/proc-setup-ext-db-without-admin-creds.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc b/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc
index c5efd3cae5..93369f5119 100644
--- a/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc
+++ b/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc
@@ -5,6 +5,7 @@ ifdef::context[:parent-context: {context}]
= Using custom TLS certificates
+[role="_abstract"]
{PlatformName} uses X.509 certificate and key pairs to secure traffic both internally between {PlatformNameShort} components and externally for public UI and API connections.
There are two primary ways to manage TLS certificates for your {PlatformNameShort} deployment:
diff --git a/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc b/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc
index fc7542e791..466fef5fb2 100644
--- a/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc
+++ b/downstream/assemblies/topologies/assembly-appendix-topology-resources.adoc
@@ -1,6 +1,9 @@
+:_mod-docs-content-type: ASSEMBLY
+
[id="appendix-topology-resources"]
= Additional resources for tested deployment models
+[role="_abstract"]
This appendix provides a reference for the additional resources relevant to the tested deployment models outlined in {TitleTopologies}.
* For additional information about each of the tested topologies described in this document, see the link:https://github.com/ansible/test-topologies/[test-topologies GitHub repo].
diff --git a/downstream/assemblies/topologies/assembly-container-topologies.adoc b/downstream/assemblies/topologies/assembly-container-topologies.adoc
index 34506d2d90..39331669ae 100644
--- a/downstream/assemblies/topologies/assembly-container-topologies.adoc
+++ b/downstream/assemblies/topologies/assembly-container-topologies.adoc
@@ -1,7 +1,9 @@
+:_mod-docs-content-type: ASSEMBLY
[id="container-topologies"]
= Container topologies
+[role="_abstract"]
The containerized installer deploys {PlatformNameShort} on {RHEL} by using Podman which runs the platform in containers on host machines. Customers manage the product and infrastructure lifecycle.
//Container growth topology
diff --git a/downstream/assemblies/topologies/assembly-ocp-topologies.adoc b/downstream/assemblies/topologies/assembly-ocp-topologies.adoc
index e4bfd573ba..6b4b8b1aa2 100644
--- a/downstream/assemblies/topologies/assembly-ocp-topologies.adoc
+++ b/downstream/assemblies/topologies/assembly-ocp-topologies.adoc
@@ -1,7 +1,9 @@
+:_mod-docs-content-type: ASSEMBLY
[id="ocp-topologies"]
= Operator topologies
+[role="_abstract"]
The {OperatorPlatformNameShort} uses Red Hat OpenShift Operators to deploy {PlatformNameShort} within Red Hat OpenShift. Customers manage the product and infrastructure lifecycle.
[IMPORTANT]
diff --git a/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc b/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc
index 391c04bb42..39a0ce4258 100644
--- a/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc
+++ b/downstream/assemblies/topologies/assembly-overview-tested-deployment-models.adoc
@@ -1,7 +1,10 @@
+:_mod-docs-content-type: ASSEMBLY
+
[id="overview-tested-deployment-models"]
= Overview of tested deployment models
+[role="_abstract"]
Red Hat tests {PlatformNameShort} {PlatformVers} with a defined set of topologies to give you opinionated deployment options. Deploy all components of {PlatformNameShort} so that all features and capabilities are available for use without the need to take further action.
Red Hat tests the installation of {PlatformNameShort} {PlatformVers} based on a defined set of infrastructure topologies or reference architectures. Enterprise organizations can use one of the {EnterpriseTopologyPlural} for production deployments to ensure the highest level of uptime, performance, and continued scalability. Organizations or deployments that are resource constrained can use a {GrowthTopology}.
diff --git a/downstream/assemblies/topologies/assembly-rpm-topologies.adoc b/downstream/assemblies/topologies/assembly-rpm-topologies.adoc
index ee511d71c6..9f44dd858f 100644
--- a/downstream/assemblies/topologies/assembly-rpm-topologies.adoc
+++ b/downstream/assemblies/topologies/assembly-rpm-topologies.adoc
@@ -3,6 +3,7 @@
= RPM topologies
+[role="_abstract"]
The RPM installer deploys {PlatformNameShort} on {RHEL} by using RPMs to install the platform on host machines. Customers manage the product and infrastructure lifecycle.
//RPM growth topology
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-diagnosing-the-problem.adoc b/downstream/assemblies/troubleshooting-aap/assembly-diagnosing-the-problem.adoc
index 357b5051e9..45c3e7e356 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-diagnosing-the-problem.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-diagnosing-the-problem.adoc
@@ -4,6 +4,7 @@
= Diagnosing the problem
+[role="_abstract"]
To start troubleshooting {PlatformNameShort}, use the `must-gather` command on {OCPShort} or the `sos` utility on a {VMBase} to collect configuration and diagnostic information. You can attach the output of these utilities to your support case.
include::troubleshooting-aap/proc-troubleshoot-must-gather.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc
index a5436bf521..4cfaa2473b 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-backup-recovery.adoc
@@ -1,8 +1,11 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-backup-recovery"]
= Backup and recovery
+[role="_abstract"]
+Use this information to troubleshoot backup and recovery.
+
* For information about performing a backup and recovery of {PlatformNameShort}, see link:{URLControllerAdminGuide}/controller-backup-and-restore[Backup and restore] in _{TitleControllerAdminGuide}_.
* For information about troubleshooting backup and recovery for installations of {OperatorPlatformNameShort} on {OCPShort}, see the link:{URLOperatorBackup}/assembly-aap-troubleshoot-backup-recover[Troubleshooting] section in _{TitleOperatorBackup}_.
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc
index 786946dc94..dbd0aad0d1 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-controller.adoc
@@ -1,8 +1,11 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-controller"]
= Resources for troubleshooting {ControllerName}
+[role="_abstract"]
+Use the following resources to troubleshoot {ControllerName}.
+
* For information about troubleshooting {ControllerName}, see link:{URLControllerAdminGuide}/controller-troubleshooting[Troubleshooting {ControllerName}] in _{TitleControllerAdminGuide}_.
* For information about troubleshooting the performance of {ControllerName}, see link:{URLControllerAdminGuide}/assembly-controller-improving-performance#ref-controller-performance-troubleshooting[Performance troubleshooting for {ControllerName}] in _{TitleControllerAdminGuide}_.
\ No newline at end of file
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-execution-environments.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-execution-environments.adoc
index 9e0f40feeb..5258bcfded 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-execution-environments.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-execution-environments.adoc
@@ -1,8 +1,9 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-execution-environments"]
= Execution environments
+[role="_abstract"]
Troubleshoot issues with execution environments.
include::troubleshooting-aap/proc-troubleshoot-use-in-controller.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-installation.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-installation.adoc
index 40a40b586f..7862e6cb29 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-installation.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-installation.adoc
@@ -1,8 +1,9 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-installation"]
= Installation
+[role="_abstract"]
Troubleshoot issues with your installation.
include::troubleshooting-aap/proc-troubleshoot-aap-packages.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc
index 1999ca1215..8947d85b74 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-jobs.adoc
@@ -4,6 +4,7 @@
= Jobs
+[role="_abstract"]
Troubleshoot issues with jobs.
// Michelle - commenting out for now as it refers to upgrade info
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-networking.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-networking.adoc
index 47c34db517..cefdeccca5 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-networking.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-networking.adoc
@@ -1,8 +1,9 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-networking"]
= Networking
+[role="_abstract"]
Troubleshoot networking issues.
include::troubleshooting-aap/proc-troubleshoot-subnet-conflict.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc
index c88e776aca..749fdbc048 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-playbooks.adoc
@@ -1,9 +1,11 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-playbooks"]
= Playbooks
+[role="_abstract"]
You can use {Navigator} to interactively troubleshoot your playbook.
+
For more information about troubleshooting a playbook with {Navigator}, see
link:{URLNavigatorGuide}/assembly-troubleshooting-navigator_ansible-navigator[Troubleshooting Ansible content with {Navigator}]
in the _{TitleNavigatorGuide}_ Guide.
diff --git a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc
index 3a12be1aa0..3e2d6df916 100644
--- a/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc
+++ b/downstream/assemblies/troubleshooting-aap/assembly-troubleshoot-upgrade.adoc
@@ -1,8 +1,9 @@
-
+:_mod-docs-content-type: ASSEMBLY
[id="troubleshoot-upgrade"]
= Upgrading
-Troubleshoot issues when upgrading to {PlatformNameShort} 2.5.
+[role="_abstract"]
+Troubleshoot issues when upgrading to {PlatformNameShort} {PlatformVers}.
include::troubleshooting-aap/proc-troubleshoot-upgrade-issues.adoc[leveloffset=+1]
diff --git a/downstream/modules/aap-migration/con-artifact-structure.adoc b/downstream/modules/aap-migration/con-artifact-structure.adoc
index 0ec6612ca5..9d6b3035c0 100644
--- a/downstream/modules/aap-migration/con-artifact-structure.adoc
+++ b/downstream/modules/aap-migration/con-artifact-structure.adoc
@@ -3,6 +3,7 @@
[id="artifact-structure"]
= Artifact structure
+[role="_abstract"]
The migration artifact serves as a comprehensive package containing all necessary components to successfully transfer your {PlatformNameShort} deployment.
Structure the artifact as follows:
diff --git a/downstream/modules/aap-migration/con-containerized-to-managed-prerequisites.adoc b/downstream/modules/aap-migration/con-containerized-to-managed-prerequisites.adoc
index 286b40bf0c..92ffbcc2fe 100644
--- a/downstream/modules/aap-migration/con-containerized-to-managed-prerequisites.adoc
+++ b/downstream/modules/aap-migration/con-containerized-to-managed-prerequisites.adoc
@@ -3,6 +3,7 @@
[id="containerized-to-managed-prerequisites"]
= Prerequisites for migrating from a container-based deployment to a Managed {PlatformNameShort} deployment
+[role="_abstract"]
Before migrating from a container-based deployment to a Managed {PlatformNameShort} deployment, ensure that you meet the following prerequisites:
* You have a source container-based deployment of {PlatformNameShort}.
diff --git a/downstream/modules/aap-migration/con-containerized-to-ocp-prerequisites.adoc b/downstream/modules/aap-migration/con-containerized-to-ocp-prerequisites.adoc
index eace6038e1..82b5350ea5 100644
--- a/downstream/modules/aap-migration/con-containerized-to-ocp-prerequisites.adoc
+++ b/downstream/modules/aap-migration/con-containerized-to-ocp-prerequisites.adoc
@@ -3,6 +3,7 @@
[id="containerized-to-ocp-prerequisites"]
= Prerequisites for migrating from a container-based deployment to an {OCPShort} deployment
+[role="_abstract"]
Before migrating from a container-based deployment to an {OCPShort} deployment, ensure that you meet the following prerequisites:
* You have a source container-based deployment of {PlatformNameShort}.
diff --git a/downstream/modules/aap-migration/con-introduction-and-objectives.adoc b/downstream/modules/aap-migration/con-introduction-and-objectives.adoc
index c9854e14d0..02f656213e 100644
--- a/downstream/modules/aap-migration/con-introduction-and-objectives.adoc
+++ b/downstream/modules/aap-migration/con-introduction-and-objectives.adoc
@@ -3,6 +3,7 @@
[id="introduction-and-objectives"]
= Introduction and objectives
+[role="_abstract"]
This document outlines the necessary steps and considerations for migrating between different {PlatformNameShort} deployment types for {PlatformNameShort} {PlatformVers}. Specifically, it focuses on these migration paths:
[options="header"]
diff --git a/downstream/modules/aap-migration/con-manifest-file.adoc b/downstream/modules/aap-migration/con-manifest-file.adoc
index 501b7b8d19..c5f291fcf4 100644
--- a/downstream/modules/aap-migration/con-manifest-file.adoc
+++ b/downstream/modules/aap-migration/con-manifest-file.adoc
@@ -3,6 +3,7 @@
[id="manifest-file"]
= Manifest file
+[role="_abstract"]
The `manifest.yml` file serves as the primary metadata document for the migration artifact, containing critical versioning and component information from your source environment.
Structure the manifest as follows:
diff --git a/downstream/modules/aap-migration/con-migration-process-overview.adoc b/downstream/modules/aap-migration/con-migration-process-overview.adoc
index 5c5b1eb088..9b87c8b75a 100644
--- a/downstream/modules/aap-migration/con-migration-process-overview.adoc
+++ b/downstream/modules/aap-migration/con-migration-process-overview.adoc
@@ -8,6 +8,7 @@
You can only migrate to a different installation type of the same {PlatformNameShort} version. For example you can migrate from RPM version {PlatformVers} to containerized {PlatformVers}, but not from RPM version 2.4 to containerized {PlatformVers}.
====
+[role="_abstract"]
The migration between {PlatformNameShort} installation types follows this general workflow:
. Prepare and assess the source environment - Prepare and assess the existing source environment for migration.
diff --git a/downstream/modules/aap-migration/con-out-of-scope.adoc b/downstream/modules/aap-migration/con-out-of-scope.adoc
index bb86c3b744..487fc33ed4 100644
--- a/downstream/modules/aap-migration/con-out-of-scope.adoc
+++ b/downstream/modules/aap-migration/con-out-of-scope.adoc
@@ -3,6 +3,7 @@
[id="out-of-scope"]
= Out of scope
+[role="_abstract"]
This guide is focused on the core components of {PlatformNameShort}. The following items are currently out of scope for the migration processes described in this document:
* {EDAName}: Configuration and content for {EDAName} must be manually recreated in the target environment.
diff --git a/downstream/modules/aap-migration/con-rpm-to-containerized-prerequisites.adoc b/downstream/modules/aap-migration/con-rpm-to-containerized-prerequisites.adoc
index e63c6d4587..77ab327864 100644
--- a/downstream/modules/aap-migration/con-rpm-to-containerized-prerequisites.adoc
+++ b/downstream/modules/aap-migration/con-rpm-to-containerized-prerequisites.adoc
@@ -3,6 +3,7 @@
[id="rpm-to-containerized-prerequisites"]
= Prerequisites for migrating from an RPM deployment to a containerized deployment
+[role="_abstract"]
Before migrating from an RPM-based deployment to a container-based deployment, ensure you meet the following prerequisites:
* You have a source RPM-based deployment of {PlatformNameShort}.
diff --git a/downstream/modules/aap-migration/con-rpm-to-managed-prerequisites.adoc b/downstream/modules/aap-migration/con-rpm-to-managed-prerequisites.adoc
index 24234723cc..1d04f66ac9 100644
--- a/downstream/modules/aap-migration/con-rpm-to-managed-prerequisites.adoc
+++ b/downstream/modules/aap-migration/con-rpm-to-managed-prerequisites.adoc
@@ -3,6 +3,7 @@
[id="rpm-to-managed-prerequisites"]
= Prerequisites for migrating from an RPM-based deployment to a Managed {PlatformNameShort} deployment
+[role="_abstract"]
Before migrating from an RPM-based deployment to a Managed {PlatformNameShort} deployment, ensure you meet the following prerequisites:
* You have a source RPM-based deployment of {PlatformNameShort}.
diff --git a/downstream/modules/aap-migration/con-rpm-to-ocp-prerequisites.adoc b/downstream/modules/aap-migration/con-rpm-to-ocp-prerequisites.adoc
index 56ab73f851..3055bdcc64 100644
--- a/downstream/modules/aap-migration/con-rpm-to-ocp-prerequisites.adoc
+++ b/downstream/modules/aap-migration/con-rpm-to-ocp-prerequisites.adoc
@@ -3,6 +3,7 @@
[id="rpm-to-ocp-prerequisites"]
= Prerequisites for migrating from an RPM-based deployment to an {OCPShort} deployment
+[role="_abstract"]
Before migrating from an RPM-based deployment to an {OCPShort} deployment, ensure you meet the following prerequisites:
* You have a source RPM-based deployment of {PlatformNameShort}.
diff --git a/downstream/modules/aap-migration/con-secrets-file.adoc b/downstream/modules/aap-migration/con-secrets-file.adoc
index 2a23dadf0e..1acc35a413 100644
--- a/downstream/modules/aap-migration/con-secrets-file.adoc
+++ b/downstream/modules/aap-migration/con-secrets-file.adoc
@@ -3,6 +3,7 @@
[id="secrets-file"]
= Secrets file
+[role="_abstract"]
The `secrets.yml` file in the migration artifact includes essential Django `SECRET_KEY` values and other sensitive data required for authentication between services.
Structure the secrets file as follows:
@@ -20,4 +21,4 @@ hub_db_fields_encryption_key:
[NOTE]
====
Ensure the `secrets.yml` file is encrypted kept in a secure location.
-====
\ No newline at end of file
+====
diff --git a/downstream/modules/aap-migration/proc-containerized-post-import.adoc b/downstream/modules/aap-migration/proc-containerized-post-import.adoc
index 30a198f567..6841fc1193 100644
--- a/downstream/modules/aap-migration/proc-containerized-post-import.adoc
+++ b/downstream/modules/aap-migration/proc-containerized-post-import.adoc
@@ -3,6 +3,7 @@
[id="containerized-post-import"]
= Reconciling the target environment post-import
+[role="_abstract"]
Perform the following post-import reconciliation steps to ensure your target environment is fully functional and correctly configured.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-containerized-target-import.adoc b/downstream/modules/aap-migration/proc-containerized-target-import.adoc
index 0133260beb..cf034c2e00 100644
--- a/downstream/modules/aap-migration/proc-containerized-target-import.adoc
+++ b/downstream/modules/aap-migration/proc-containerized-target-import.adoc
@@ -3,6 +3,7 @@
[id="containerized-target-import"]
= Importing the migration content to the target environment
+[role="_abstract"]
To import your migration content into the target environment, stop the containerized services, import the database dumps, and then restart the services.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-containerized-target-prep.adoc b/downstream/modules/aap-migration/proc-containerized-target-prep.adoc
index 0f9eda990b..a53c458558 100644
--- a/downstream/modules/aap-migration/proc-containerized-target-prep.adoc
+++ b/downstream/modules/aap-migration/proc-containerized-target-prep.adoc
@@ -3,6 +3,7 @@
[id="containerized-target-prep"]
= Preparing and assessing the target environment
+[role="_abstract"]
To prepare your target environment, perform the following steps.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-containerized-validation.adoc b/downstream/modules/aap-migration/proc-containerized-validation.adoc
index 81f242c410..75cbd5a9eb 100644
--- a/downstream/modules/aap-migration/proc-containerized-validation.adoc
+++ b/downstream/modules/aap-migration/proc-containerized-validation.adoc
@@ -3,6 +3,7 @@
[id="containerized-validation"]
= Validating the target environment
+[role="_abstract"]
After completing the migration, validate your target environment to ensure all components are functional and operating as expected.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-managed-post-import.adoc b/downstream/modules/aap-migration/proc-managed-post-import.adoc
index 70f53a6398..11060a5aee 100644
--- a/downstream/modules/aap-migration/proc-managed-post-import.adoc
+++ b/downstream/modules/aap-migration/proc-managed-post-import.adoc
@@ -3,6 +3,7 @@
[id="managed-post-import"]
= Reconciling the target environment post-migration
+[role="_abstract"]
After a successful migration, perform the following tasks:
.Procedure
diff --git a/downstream/modules/aap-migration/proc-managed-target-migration.adoc b/downstream/modules/aap-migration/proc-managed-target-migration.adoc
index f340f17b91..3c16de889e 100644
--- a/downstream/modules/aap-migration/proc-managed-target-migration.adoc
+++ b/downstream/modules/aap-migration/proc-managed-target-migration.adoc
@@ -3,6 +3,9 @@
[id="managed-target-migration"]
= Migrating to Managed {PlatformNameShort}
+[role="_abstract"]
+Follow this procedure to migrate to Managed {PlatformNameShort}.
+
.Prerequisites
* You have a migration artifact from your source environment.
diff --git a/downstream/modules/aap-migration/proc-ocp-post-import.adoc b/downstream/modules/aap-migration/proc-ocp-post-import.adoc
index faf58635f4..91db99d7ad 100644
--- a/downstream/modules/aap-migration/proc-ocp-post-import.adoc
+++ b/downstream/modules/aap-migration/proc-ocp-post-import.adoc
@@ -3,6 +3,7 @@
[id="ocp-post-import"]
= Reconciling the target environment post-import
+[role="_abstract"]
After importing your migration artifact, perform the following steps to reconcile your target environment.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-ocp-target-import.adoc b/downstream/modules/aap-migration/proc-ocp-target-import.adoc
index 9ca5aa4241..a45f72ce03 100644
--- a/downstream/modules/aap-migration/proc-ocp-target-import.adoc
+++ b/downstream/modules/aap-migration/proc-ocp-target-import.adoc
@@ -3,6 +3,7 @@
[id="ocp-target-import"]
= Importing the migration content to the target environment
+[role="_abstract"]
To import your environment, scale down {PlatformNameShort} components, restore databases, replace encryption secrets, and scale services back up.
[NOTE]
diff --git a/downstream/modules/aap-migration/proc-ocp-target-prep.adoc b/downstream/modules/aap-migration/proc-ocp-target-prep.adoc
index b185703ea4..45e548457c 100644
--- a/downstream/modules/aap-migration/proc-ocp-target-prep.adoc
+++ b/downstream/modules/aap-migration/proc-ocp-target-prep.adoc
@@ -3,6 +3,7 @@
[id="ocp-target-prep"]
= Preparing and assessing the target environment
+[role="_abstract"]
To prepare and assess your target environment, perform the following steps.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-ocp-validation.adoc b/downstream/modules/aap-migration/proc-ocp-validation.adoc
index c48b56d9f4..76aff07010 100644
--- a/downstream/modules/aap-migration/proc-ocp-validation.adoc
+++ b/downstream/modules/aap-migration/proc-ocp-validation.adoc
@@ -3,6 +3,7 @@
[id="ocp-validation"]
= Validating the target environment
+[role="_abstract"]
To validate your migrated environment, perform the following steps.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-rpm-environment-source-prep.adoc b/downstream/modules/aap-migration/proc-rpm-environment-source-prep.adoc
index d7f0a99882..1d49180a8b 100644
--- a/downstream/modules/aap-migration/proc-rpm-environment-source-prep.adoc
+++ b/downstream/modules/aap-migration/proc-rpm-environment-source-prep.adoc
@@ -3,6 +3,7 @@
[id="rpm-environment-source-prep"]
= Preparing and assessing the source environment
+[role="_abstract"]
Before beginning your migration, document your current RPM deployment. This documentation serves as a reference throughout the migration process and is critical for properly configuring your target environment.
.Procedure
diff --git a/downstream/modules/aap-migration/proc-rpm-source-environment-export.adoc b/downstream/modules/aap-migration/proc-rpm-source-environment-export.adoc
index e400c402c0..96e9b49e0b 100644
--- a/downstream/modules/aap-migration/proc-rpm-source-environment-export.adoc
+++ b/downstream/modules/aap-migration/proc-rpm-source-environment-export.adoc
@@ -3,6 +3,7 @@
[id="rpm-source-environment-export"]
= Exporting the source environment
+[role="_abstract"]
From your source environment, export the data and configurations needed for migration.
.Procedure
diff --git a/downstream/modules/aap-migration/ref-migration-artifact-checklist.adoc b/downstream/modules/aap-migration/ref-migration-artifact-checklist.adoc
index d067be57bd..f9acaacacc 100644
--- a/downstream/modules/aap-migration/ref-migration-artifact-checklist.adoc
+++ b/downstream/modules/aap-migration/ref-migration-artifact-checklist.adoc
@@ -3,6 +3,7 @@
[id="migration-artifact-checklist"]
= Migration artifact creation checklist
+[role="_abstract"]
Use this checklist to verify the migration artifact.
* Database dumps: Include complete database dumps for each component.
diff --git a/downstream/modules/platform/con-certs-per-service-considerations.adoc b/downstream/modules/platform/con-certs-per-service-considerations.adoc
index cb9634540c..f8aafb1654 100644
--- a/downstream/modules/platform/con-certs-per-service-considerations.adoc
+++ b/downstream/modules/platform/con-certs-per-service-considerations.adoc
@@ -3,6 +3,7 @@
[id="certs-per-service-considerations"]
= Considerations for certificates provided per service
+[role="_abstract"]
When providing custom TLS certificates for each individual service, consider the following:
* It is possible to provide unique certificates per host. This requires defining the specific `_tls_cert` and `_tls_key` variables in your inventory file as shown in the earlier inventory file example.
diff --git a/downstream/modules/platform/con-installer-generated-certs.adoc b/downstream/modules/platform/con-installer-generated-certs.adoc
index 5713f0ef99..e63697f81b 100644
--- a/downstream/modules/platform/con-installer-generated-certs.adoc
+++ b/downstream/modules/platform/con-installer-generated-certs.adoc
@@ -3,6 +3,7 @@
[id="installer-generated-certificates"]
= {PlatformNameShort} generated certificates
+[role="_abstract"]
By default, the installation program creates a self-signed Certificate Authority (CA) and uses it to generate self-signed TLS certificates for all {PlatformNameShort} services. The self-signed CA certificate and key are generated on one node under the `~/aap/tls/` directory and copied to the same location on all other nodes. This CA is valid for 10 years after the initial creation date.
Self-signed certificates are not part of any public chain of trust. The installation program creates a certificate truststore that includes the self-signed CA certificate under `~/aap/tls/extracted/` and bind-mounts that directory to each {PlatformNameShort} service container under `/etc/pki/ca-trust/extracted/`. This allows each {PlatformNameShort} component to validate the self-signed certificates of the other {PlatformNameShort} services. The CA certificate can also be added to the truststore of other systems or browsers as needed.
diff --git a/downstream/modules/platform/con-receptor-cert-considerations.adoc b/downstream/modules/platform/con-receptor-cert-considerations.adoc
index 328172b6e1..2449a6e778 100644
--- a/downstream/modules/platform/con-receptor-cert-considerations.adoc
+++ b/downstream/modules/platform/con-receptor-cert-considerations.adoc
@@ -3,6 +3,7 @@
[id="receptor-certificate-considerations"]
= Receptor certificate considerations
+[role="_abstract"]
When using a custom certificate for Receptor nodes, the certificate requires the `otherName` field specified in the Subject Alternative Name (SAN) of the certificate with the value `1.3.6.1.4.1.2312.19.1`. For more information, see link:https://ansible.readthedocs.io/projects/receptor/en/latest/user_guide/tls.html#above-the-mesh-tls[Above the mesh TLS].
Receptor does not support the usage of wildcard certificates. Additionally, each Receptor certificate must have the host FQDN specified in its SAN for TLS hostname validation to be correctly performed.
diff --git a/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc b/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc
index 0ccb5fce07..76cdb66fb9 100644
--- a/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc
+++ b/downstream/modules/platform/proc-add-eda-safe-plugin-var.adoc
@@ -1,10 +1,10 @@
:_mod-docs-content-type: PROCEDURE
-
[id="proc-add-eda-safe-plugin-var"]
= Adding a safe plugin variable to {EDAcontroller}
+[role="_abstract"]
When using `redhat.insights_eda` or similar plugins to run rulebook activations in {EDAcontroller}, you must add a safe plugin variable to a directory in {PlatformNameShort}. This ensures connection between {EDAcontroller} and the source plugin, and displays port mappings correctly.
.Procedure
diff --git a/downstream/modules/platform/proc-configure-ext-db-mtls.adoc b/downstream/modules/platform/proc-configure-ext-db-mtls.adoc
index 665ff18b3a..d6f5b0ac40 100644
--- a/downstream/modules/platform/proc-configure-ext-db-mtls.adoc
+++ b/downstream/modules/platform/proc-configure-ext-db-mtls.adoc
@@ -3,6 +3,7 @@
[id="configure-ext-db-mtls"]
= Optional: configuring mutual TLS (mTLS) authentication for an external database
+[role="_abstract"]
mTLS authentication is disabled by default. To configure each component's database with mTLS authentication, add the following variables to your inventory file under the `[all:vars]` group and ensure each component has a different TLS certificate and key:
.Procedure
diff --git a/downstream/modules/platform/proc-configure-haproxy-load-balancer.adoc b/downstream/modules/platform/proc-configure-haproxy-load-balancer.adoc
index 6325bb3167..75b54ef3b5 100644
--- a/downstream/modules/platform/proc-configure-haproxy-load-balancer.adoc
+++ b/downstream/modules/platform/proc-configure-haproxy-load-balancer.adoc
@@ -3,6 +3,7 @@
[id="configuring-haproxy-load-balancer"]
= Configuring a HAProxy load balancer
+[role="_abstract"]
To configure a HAProxy load balancer in front of {Gateway} with a custom CA cert, set the following inventory file variables under the `[all:vars]` group:
----
diff --git a/downstream/modules/platform/proc-configure-hub-azure-storage.adoc b/downstream/modules/platform/proc-configure-hub-azure-storage.adoc
index 2d59dcd46a..dba5682986 100644
--- a/downstream/modules/platform/proc-configure-hub-azure-storage.adoc
+++ b/downstream/modules/platform/proc-configure-hub-azure-storage.adoc
@@ -3,6 +3,7 @@
[id="configure-hub-azure-storage"]
= Configuring Azure Blob Storage for {HubName}
+[role="_abstract"]
Azure Blob storage is a type of object storage that is supported in containerized installations. When using an Azure blob storage backend, set `hub_storage_backend` to `azure`. The Azure container needs to exist before running the installation program.
.Procedure
diff --git a/downstream/modules/platform/proc-configure-hub-nfs-storage.adoc b/downstream/modules/platform/proc-configure-hub-nfs-storage.adoc
index 556972b456..fc6ad478ee 100644
--- a/downstream/modules/platform/proc-configure-hub-nfs-storage.adoc
+++ b/downstream/modules/platform/proc-configure-hub-nfs-storage.adoc
@@ -3,6 +3,7 @@
[id="configure-hub-nfs-storage"]
= Configuring Network File System (NFS) storage for {HubName}
+[role="_abstract"]
NFS is a type of shared storage that is supported in containerized installations. Shared storage is required when installing more than one instance of {HubName} with a `file` storage backend. When installing a single instance of the {HubName}, shared storage is optional.
.Procedure
diff --git a/downstream/modules/platform/proc-configure-hub-s3-storage.adoc b/downstream/modules/platform/proc-configure-hub-s3-storage.adoc
index e8cde7ead0..1514f8d529 100644
--- a/downstream/modules/platform/proc-configure-hub-s3-storage.adoc
+++ b/downstream/modules/platform/proc-configure-hub-s3-storage.adoc
@@ -3,6 +3,7 @@
[id="configure-hub-s3-storage"]
= Configuring Amazon S3 storage for {HubName}
+[role="_abstract"]
Amazon S3 storage is a type of object storage that is supported in containerized installations. When using an AWS S3 storage backend, set `hub_storage_backend` to `s3`. The AWS S3 bucket needs to exist before running the installation program.
.Procedure
diff --git a/downstream/modules/platform/proc-containerized-troubleshoot-gathering-logs.adoc b/downstream/modules/platform/proc-containerized-troubleshoot-gathering-logs.adoc
index caa829f82d..3cdb5a440d 100644
--- a/downstream/modules/platform/proc-containerized-troubleshoot-gathering-logs.adoc
+++ b/downstream/modules/platform/proc-containerized-troubleshoot-gathering-logs.adoc
@@ -3,6 +3,7 @@
= Gathering {PlatformNameShort} logs
+[role="_abstract"]
With the `sos` utility, you can collect configuration, diagnostic, and troubleshooting data, and give those files to Red Hat Technical Support. An `sos` report is a common starting point for Red Hat technical support engineers when performing analysis of a service request for {PlatformNameShort}.
You can collect an `sos` report for each host in your containerized {PlatformNameShort} deployment by running the `log_gathering` playbook with the appropriate parameters.
@@ -31,19 +32,15 @@ $ ansible-playbook -i ansible.containerized_installer.l
[options="header"]
|====
| Parameter name | Description | Default
-
| `target_sos_directory`
| Used to change the default location for the `sos` report files.
| `/tmp` directory of the current server.
-
| `case_number`
| Specifies the support case number if relevant to the log gathering.
|
-
| `clean`
| Obfuscates sensitive data that might be present on the `sos` report.
| `false`
-
| `upload`
| Automatically uploads the `sos` report data to Red Hat.
| `false`
diff --git a/downstream/modules/platform/proc-downloading-containerized-aap.adoc b/downstream/modules/platform/proc-downloading-containerized-aap.adoc
index 0e059a3775..2a96ac0dc0 100644
--- a/downstream/modules/platform/proc-downloading-containerized-aap.adoc
+++ b/downstream/modules/platform/proc-downloading-containerized-aap.adoc
@@ -4,6 +4,7 @@
= Downloading {PlatformNameShort}
+[role="_abstract"]
Choose the installation program you need based on your {RHEL} environment internet connectivity and download the installation program to your {RHEL} host.
.Prerequisites
diff --git a/downstream/modules/platform/proc-enable-hstore-extension.adoc b/downstream/modules/platform/proc-enable-hstore-extension.adoc
index eaf878e606..7c3eabe6e1 100644
--- a/downstream/modules/platform/proc-enable-hstore-extension.adoc
+++ b/downstream/modules/platform/proc-enable-hstore-extension.adoc
@@ -4,6 +4,7 @@
= Enabling the hstore extension for the {HubName} PostgreSQL database
+[role="_abstract"]
The database migration script uses `hstore` fields to store information, therefore the `hstore` extension must be enabled in the {HubName} PostgreSQL database.
This process is automatic when using the {PlatformNameShort} installer and a managed PostgreSQL server.
diff --git a/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc b/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
index 0840347165..be1320f250 100644
--- a/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
+++ b/downstream/modules/platform/proc-enabling-automation-hub-collection-and-container-signing.adoc
@@ -3,6 +3,7 @@
[id="enabling-automation-hub-collection-and-container-signing_{context}"]
= Enabling automation content collection and container signing
+[role="_abstract"]
Automation content signing is disabled by default. To enable it, the following installation variables are required in the inventory file:
[source,yaml]
diff --git a/downstream/modules/platform/proc-installing-containerized-aap.adoc b/downstream/modules/platform/proc-installing-containerized-aap.adoc
index fc182dc49a..cfb8ddbdbe 100644
--- a/downstream/modules/platform/proc-installing-containerized-aap.adoc
+++ b/downstream/modules/platform/proc-installing-containerized-aap.adoc
@@ -4,6 +4,7 @@
= Installing containerized {PlatformNameShort}
+[role="_abstract"]
After you prepare the {RHEL} host, download {PlatformNameShort}, and configure the inventory file, run the `install` playbook to install containerized {PlatformNameShort}.
.Prerequisites
diff --git a/downstream/modules/platform/proc-preparing-the-managed-nodes-for-containerized-installation.adoc b/downstream/modules/platform/proc-preparing-the-managed-nodes-for-containerized-installation.adoc
index 34a0191adf..52585fb7cd 100644
--- a/downstream/modules/platform/proc-preparing-the-managed-nodes-for-containerized-installation.adoc
+++ b/downstream/modules/platform/proc-preparing-the-managed-nodes-for-containerized-installation.adoc
@@ -4,6 +4,7 @@
= Preparing the managed nodes for containerized installation
+[role="_abstract"]
Managed nodes, also referred to as hosts, are the devices that {PlatformNameShort} is configured to manage.
To ensure a consistent and secure setup of containerized {PlatformNameShort}, create a dedicated user on each host. {PlatformNameShort} connects as this user to run tasks on the host.
diff --git a/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc b/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc
index e9ff12f482..6a6b079517 100644
--- a/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc
+++ b/downstream/modules/platform/proc-preparing-the-rhel-host-for-containerized-installation.adoc
@@ -4,6 +4,7 @@
= Preparing the {RHEL} host for containerized installation
+[role="_abstract"]
Containerized {PlatformNameShort} runs the component services as Podman based containers on top of a {RHEL} host. Prepare the {RHEL} host to ensure a successful installation.
.Procedure
diff --git a/downstream/modules/platform/proc-provide-custom-ca-cert.adoc b/downstream/modules/platform/proc-provide-custom-ca-cert.adoc
index 3db3fc75d1..e8b762e951 100644
--- a/downstream/modules/platform/proc-provide-custom-ca-cert.adoc
+++ b/downstream/modules/platform/proc-provide-custom-ca-cert.adoc
@@ -3,6 +3,7 @@
[id="providing-a-custom-ca-certificate"]
= Providing a custom CA certificate
+[role="_abstract"]
When you manually provide TLS certificates, those certificates might be signed by a custom CA. Provide a custom CA certificate to ensure proper authentication and secure communication within your environment. If you have multiple custom CA certificates, you must merge them into a single file.
.Procedure
diff --git a/downstream/modules/platform/proc-provide-custom-tls-certs-per-service.adoc b/downstream/modules/platform/proc-provide-custom-tls-certs-per-service.adoc
index 348f8b0de6..29ee2e2ec5 100644
--- a/downstream/modules/platform/proc-provide-custom-tls-certs-per-service.adoc
+++ b/downstream/modules/platform/proc-provide-custom-tls-certs-per-service.adoc
@@ -3,6 +3,7 @@
[id="proc-provide-custom-tls-certs-per-service"]
= Providing custom TLS certificates for each service
+[role="_abstract"]
Use this method if your organization manages TLS certificates outside of {PlatformNameShort} and requires manual provisioning.
.Procedure
diff --git a/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc b/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc
index 06a712e428..e2dc8a06d1 100644
--- a/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc
+++ b/downstream/modules/platform/proc-reinstalling-containerized-aap.adoc
@@ -4,7 +4,6 @@
= Reinstalling containerized {PlatformNameShort}
[role="_abstract"]
-
To reinstall a containerized deployment after uninstalling and preserving the database, follow the steps in link:{URLContainerizedInstall}/aap-containerized-installation#installing-containerized-aap[Installing containerized {PlatformNameShort}] and include the existing secret key value in the playbook command:
----
diff --git a/downstream/modules/platform/proc-set-registry-username-password.adoc b/downstream/modules/platform/proc-set-registry-username-password.adoc
index e248ee27db..7bff6a9224 100644
--- a/downstream/modules/platform/proc-set-registry-username-password.adoc
+++ b/downstream/modules/platform/proc-set-registry-username-password.adoc
@@ -4,6 +4,7 @@
= Setting registry_username and registry_password
+[role="_abstract"]
When using the `registry_username` and `registry_password` variables for an online non-bundled installation, you need to create a new registry service account.
Registry service accounts are named tokens that can be used in environments where credentials will be shared, such as deployment systems.
diff --git a/downstream/modules/platform/proc-setup-ext-db-with-admin-creds.adoc b/downstream/modules/platform/proc-setup-ext-db-with-admin-creds.adoc
index 8a3c2fe975..de784ff3b0 100644
--- a/downstream/modules/platform/proc-setup-ext-db-with-admin-creds.adoc
+++ b/downstream/modules/platform/proc-setup-ext-db-with-admin-creds.adoc
@@ -3,6 +3,7 @@
[id="setup-ext-db-with-admin-creds"]
= Setting up an external database with PostgreSQL admin credentials
+[role="_abstract"]
If you have PostgreSQL admin credentials, you can supply them in the inventory file and the installation program creates the PostgreSQL users and databases for each component for you. The PostgreSQL admin account must have `SUPERUSER` privileges.
.Procedure
diff --git a/downstream/modules/platform/proc-setup-ext-db-without-admin-creds.adoc b/downstream/modules/platform/proc-setup-ext-db-without-admin-creds.adoc
index 80225bd41e..6eef822ea3 100644
--- a/downstream/modules/platform/proc-setup-ext-db-without-admin-creds.adoc
+++ b/downstream/modules/platform/proc-setup-ext-db-without-admin-creds.adoc
@@ -3,6 +3,7 @@
[id="setup-ext-db-without-admin-creds"]
= Setting up an external database without PostgreSQL admin credentials
+[role="_abstract"]
If you do not have PostgreSQL admin credentials, then PostgreSQL users and databases need to be created for each component ({Gateway}, {ControllerName}, {HubName}, and {EDAName}) before running the installation program.
.Procedure
diff --git a/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc b/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc
index d900b7f72c..aef7669c0d 100644
--- a/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc
+++ b/downstream/modules/platform/proc-uninstalling-containerized-aap.adoc
@@ -3,6 +3,7 @@
[id="uninstalling-containerized-aap"]
= Uninstalling containerized {PlatformNameShort}
+[role="_abstract"]
Uninstall your {ContainerBase} of {PlatformNameShort}.
.Prerequisites
diff --git a/downstream/modules/platform/proc-update-aap-container.adoc b/downstream/modules/platform/proc-update-aap-container.adoc
index 7a8b356968..03513bcdc5 100644
--- a/downstream/modules/platform/proc-update-aap-container.adoc
+++ b/downstream/modules/platform/proc-update-aap-container.adoc
@@ -3,6 +3,7 @@
= Updating containerized {PlatformNameShort}
+[role="_abstract"]
Perform an upgrade of containerized {PlatformNameShort}.
.Prerequisites
diff --git a/downstream/modules/platform/proc-use-custom-ca-certs.adoc b/downstream/modules/platform/proc-use-custom-ca-certs.adoc
index ff193bd76e..b425367adb 100644
--- a/downstream/modules/platform/proc-use-custom-ca-certs.adoc
+++ b/downstream/modules/platform/proc-use-custom-ca-certs.adoc
@@ -3,6 +3,7 @@
[id="use-custom-ca-certs"]
= Using a custom CA to generate all TLS certificates
+[role="_abstract"]
Use this method when you want {PlatformNameShort} to generate all of the certificates, but you want them signed by a custom CA rather than the default self-signed certificates.
.Procedure
diff --git a/downstream/modules/platform/ref-adding-execution-nodes.adoc b/downstream/modules/platform/ref-adding-execution-nodes.adoc
index 6fae512a48..2b7aca1f97 100644
--- a/downstream/modules/platform/ref-adding-execution-nodes.adoc
+++ b/downstream/modules/platform/ref-adding-execution-nodes.adoc
@@ -1,11 +1,9 @@
-:_newdoc-version: 2.15.1
-:_template-generated: 2024-01-12
-
:_mod-docs-content-type: REFERENCE
[id="adding-execution-nodes_{context}"]
= Adding execution nodes
+[role="_abstract"]
Containerized {PlatformNameShort} can deploy remote execution nodes.
You can define remote execution nodes in the `[execution_nodes]` group of your inventory file:
diff --git a/downstream/modules/platform/ref-ansible-inventory-variables.adoc b/downstream/modules/platform/ref-ansible-inventory-variables.adoc
index 52d4eaf011..0834748f32 100644
--- a/downstream/modules/platform/ref-ansible-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-ansible-inventory-variables.adoc
@@ -4,6 +4,7 @@
= Ansible variables
+[role="_abstract"]
The following variables control how {PlatformNameShort} interacts with remote hosts.
.Ansible variables
diff --git a/downstream/modules/platform/ref-configuring-inventory-file.adoc b/downstream/modules/platform/ref-configuring-inventory-file.adoc
index f2a95d0caa..69cdcd6de0 100644
--- a/downstream/modules/platform/ref-configuring-inventory-file.adoc
+++ b/downstream/modules/platform/ref-configuring-inventory-file.adoc
@@ -3,6 +3,7 @@
[id="configuring-inventory-file"]
= Configuring the inventory file
+[role="_abstract"]
You can control the installation of {PlatformNameShort} with inventory files. Inventory files define the information needed to customize the installation. For example, host details, certificate details, and various component-specific settings.
Example inventory files are available in this document that you can copy and change to quickly get started.
diff --git a/downstream/modules/platform/ref-cont-aap-system-requirements.adoc b/downstream/modules/platform/ref-cont-aap-system-requirements.adoc
index 9a746d1bad..9502c3c085 100644
--- a/downstream/modules/platform/ref-cont-aap-system-requirements.adoc
+++ b/downstream/modules/platform/ref-cont-aap-system-requirements.adoc
@@ -4,6 +4,7 @@
= System requirements
+[role="_abstract"]
Use this information when planning your installation of containerized {PlatformNameShort}.
== Prerequisites
diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc
index 2dc10a8ec6..8f4211e9a8 100644
--- a/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc
+++ b/downstream/modules/platform/ref-containerized-troubleshoot-config.adoc
@@ -3,6 +3,9 @@
= Troubleshooting containerized {PlatformNameShort} configuration
+[role="_abstract"]
+Use this information to troubleshoot your containerized {PlatformNameShort} configuration.
+
*Sometimes the post install for seeding my {PlatformNameShort} content errors out*
This could manifest itself as output similar to this:
diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc
index fb8d42e8f5..b871ef0ef4 100644
--- a/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc
+++ b/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc
@@ -3,6 +3,7 @@
= Diagnosing the problem
+[role="_abstract"]
For general container-based troubleshooting, you can inspect the container logs for any running service to help troubleshoot underlying issues.
*Identifying the running containers*
diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc
index b5e829eb53..2dd4ef0ddd 100644
--- a/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc
+++ b/downstream/modules/platform/ref-containerized-troubleshoot-install.adoc
@@ -3,6 +3,9 @@
= Troubleshooting containerized {PlatformNameShort} installation
+[role="_abstract"]
+Use this information to troubleshoot your containerized installation of {PlatformNameShort}.
+
*The installation takes a long time, or has errors, what should I check?*
. Ensure your system meets the minimum requirements as outlined in link:{URLContainerizedInstall}/aap-containerized-installation#system-requirements[System requirements]. Factors such as improper storage choices and high latency when distributing across many hosts will all have an impact on installation time.
diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc
index bb56363f43..461e76a0cd 100644
--- a/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc
+++ b/downstream/modules/platform/ref-containerized-troubleshoot-ref.adoc
@@ -4,6 +4,9 @@
= Containerized {PlatformNameShort} reference
+[role="_abstract"]
+Use this information to understand the architecture for your containerized {PlatformNameShort} deployment.
+
*Can you give details of the architecture for the {PlatformNameShort} containerized design?*
We use as much of the underlying native {RHEL} technology as possible. Podman is used for the container runtime and management of services.
diff --git a/downstream/modules/platform/ref-controller-variables.adoc b/downstream/modules/platform/ref-controller-variables.adoc
index ee3da4d132..321439bfea 100644
--- a/downstream/modules/platform/ref-controller-variables.adoc
+++ b/downstream/modules/platform/ref-controller-variables.adoc
@@ -4,6 +4,9 @@
= {ControllerNameStart} variables
+[role="_abstract"]
+Inventory file variables for {ControllerName}.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-database-inventory-variables.adoc b/downstream/modules/platform/ref-database-inventory-variables.adoc
index cc3b0e41a3..1073fa1d06 100644
--- a/downstream/modules/platform/ref-database-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-database-inventory-variables.adoc
@@ -4,6 +4,9 @@
= Database variables
+[role="_abstract"]
+Inventory file variables for the database used with {PlatformNameShort}.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-eda-controller-variables.adoc b/downstream/modules/platform/ref-eda-controller-variables.adoc
index a0f8b989c5..dd30231982 100644
--- a/downstream/modules/platform/ref-eda-controller-variables.adoc
+++ b/downstream/modules/platform/ref-eda-controller-variables.adoc
@@ -4,6 +4,9 @@
= {EDAcontroller} variables
+[role="_abstract"]
+Inventory file variables for {EDAcontroller}.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-gateway-variables.adoc b/downstream/modules/platform/ref-gateway-variables.adoc
index 1a3769f1f3..85e73db7b4 100644
--- a/downstream/modules/platform/ref-gateway-variables.adoc
+++ b/downstream/modules/platform/ref-gateway-variables.adoc
@@ -1,9 +1,11 @@
:_mod-docs-content-type: REFERENCE
[id="platform-gateway-variables"]
-
= {GatewayStart} variables
+[role="_abstract"]
+Inventory file variables for {Gateway}.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-general-inventory-variables.adoc b/downstream/modules/platform/ref-general-inventory-variables.adoc
index ef9701840b..92d450e34c 100644
--- a/downstream/modules/platform/ref-general-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-general-inventory-variables.adoc
@@ -4,6 +4,9 @@
= General variables
+[role="_abstract"]
+General inventory file variables for {PlatformNameShort}.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-hub-variables.adoc b/downstream/modules/platform/ref-hub-variables.adoc
index 5bde6f9d01..455b936ee9 100644
--- a/downstream/modules/platform/ref-hub-variables.adoc
+++ b/downstream/modules/platform/ref-hub-variables.adoc
@@ -4,6 +4,9 @@
= {HubNameStart} variables
+[role="_abstract"]
+Inventory file variables for {HubName}.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-images-inventory-variables.adoc b/downstream/modules/platform/ref-images-inventory-variables.adoc
index 188b6c8328..0e9e1ffc98 100644
--- a/downstream/modules/platform/ref-images-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-images-inventory-variables.adoc
@@ -4,6 +4,9 @@
= Image variables
+[role="_abstract"]
+Inventory file variables for images.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-receptor-inventory-variables.adoc b/downstream/modules/platform/ref-receptor-inventory-variables.adoc
index f5fbc46901..d8f7a58cf8 100644
--- a/downstream/modules/platform/ref-receptor-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-receptor-inventory-variables.adoc
@@ -4,6 +4,9 @@
= Receptor variables
+[role="_abstract"]
+Inventory file variables for Receptor.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/platform/ref-redis-inventory-variables.adoc b/downstream/modules/platform/ref-redis-inventory-variables.adoc
index 8f13f49822..fd0da1df38 100644
--- a/downstream/modules/platform/ref-redis-inventory-variables.adoc
+++ b/downstream/modules/platform/ref-redis-inventory-variables.adoc
@@ -4,6 +4,9 @@
= Redis variables
+[role="_abstract"]
+Inventory file variables for Redis.
+
[cols="25%,25%,30%,10%,10%",options="header"]
|===
| RPM variable name | Container variable name | Description | Required or optional | Default
diff --git a/downstream/modules/topologies/ref-cont-a-env-a.adoc b/downstream/modules/topologies/ref-cont-a-env-a.adoc
index cc932ea004..c0c1ebc774 100644
--- a/downstream/modules/topologies/ref-cont-a-env-a.adoc
+++ b/downstream/modules/topologies/ref-cont-a-env-a.adoc
@@ -2,6 +2,7 @@
[id="cont-a-env-a"]
= Container {GrowthTopology}
+[role="_abstract"]
include::snippets/growth-topologies.adoc[]
== Infrastructure topology
diff --git a/downstream/modules/topologies/ref-cont-b-env-a.adoc b/downstream/modules/topologies/ref-cont-b-env-a.adoc
index 504cfc3377..8a7662a07d 100644
--- a/downstream/modules/topologies/ref-cont-b-env-a.adoc
+++ b/downstream/modules/topologies/ref-cont-b-env-a.adoc
@@ -2,6 +2,7 @@
[id="cont-b-env-a"]
= Container {EnterpriseTopology}
+[role="_abstract"]
include::snippets/enterprise-topologies.adoc[]
== Infrastructure topology
diff --git a/downstream/modules/topologies/ref-installation-deployment-models.adoc b/downstream/modules/topologies/ref-installation-deployment-models.adoc
index dd32d55245..e9872d65ac 100644
--- a/downstream/modules/topologies/ref-installation-deployment-models.adoc
+++ b/downstream/modules/topologies/ref-installation-deployment-models.adoc
@@ -3,6 +3,7 @@
= Installation and deployment models
+[role="_abstract"]
The following table outlines the different ways to install or deploy {PlatformNameShort}:
.{PlatformNameShort} installation and deployment models
diff --git a/downstream/modules/topologies/ref-mesh-nodes.adoc b/downstream/modules/topologies/ref-mesh-nodes.adoc
index 0e3b47b64a..c15eb67d78 100644
--- a/downstream/modules/topologies/ref-mesh-nodes.adoc
+++ b/downstream/modules/topologies/ref-mesh-nodes.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: REFERENCE
[id="mesh-nodes"]
= {AutomationMeshStart} nodes
+[role="_abstract"]
{AutomationMeshStart} is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers. This is done through nodes that establish peer-to-peer connections with each other by using existing networks.
== Tested system configurations
diff --git a/downstream/modules/topologies/ref-ocp-a-env-a.adoc b/downstream/modules/topologies/ref-ocp-a-env-a.adoc
index 0d32df81ef..e7c9be7115 100644
--- a/downstream/modules/topologies/ref-ocp-a-env-a.adoc
+++ b/downstream/modules/topologies/ref-ocp-a-env-a.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: REFERENCE
[id="ocp-a-env-a"]
= Operator {GrowthTopology}
+[role="_abstract"]
include::snippets/growth-topologies.adoc[]
== Infrastructure topology
diff --git a/downstream/modules/topologies/ref-ocp-b-env-a.adoc b/downstream/modules/topologies/ref-ocp-b-env-a.adoc
index 3e44787345..838662180e 100644
--- a/downstream/modules/topologies/ref-ocp-b-env-a.adoc
+++ b/downstream/modules/topologies/ref-ocp-b-env-a.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: REFERENCE
[id="ocp-b-env-a"]
= Operator {EnterpriseTopology}
+[role="_abstract"]
include::snippets/enterprise-topologies.adoc[]
== Infrastructure topology
diff --git a/downstream/modules/topologies/ref-rpm-a-env-a.adoc b/downstream/modules/topologies/ref-rpm-a-env-a.adoc
index 41a943dfc5..239c1604c5 100644
--- a/downstream/modules/topologies/ref-rpm-a-env-a.adoc
+++ b/downstream/modules/topologies/ref-rpm-a-env-a.adoc
@@ -2,6 +2,7 @@
[id="rpm-a-env-a"]
= RPM {GrowthTopology}
+[role="_abstract"]
include::snippets/growth-topologies.adoc[]
== Infrastructure topology
diff --git a/downstream/modules/topologies/ref-rpm-b-env-a.adoc b/downstream/modules/topologies/ref-rpm-b-env-a.adoc
index f0dfbe22ea..71e4194454 100644
--- a/downstream/modules/topologies/ref-rpm-b-env-a.adoc
+++ b/downstream/modules/topologies/ref-rpm-b-env-a.adoc
@@ -2,6 +2,7 @@
[id="rpm-b-env-a"]
= RPM {EnterpriseTopology}
+[role="_abstract"]
include::snippets/enterprise-topologies.adoc[]
== Infrastructure topology
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc
index a2d21f4a5f..0d84cbe253 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-aap-packages.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: PROCEDURE
[id="troubleshoot-aap-packages"]
= Issue - Cannot locate certain packages that come bundled with the {PlatformNameShort} installer
+[role="_abstract"]
You cannot locate certain packages that come bundled with the {PlatformNameShort} installer, or you are seeing a "Repositories disabled by configuration" message.
-To resolve this issue, enable the repository by using the `subscription-manager` command in the command line. For more information about resolving this issue, see the _Troubleshooting_ section of link:{URLCentralAuth}/assembly-gateway-licensing#proc-attaching-subscriptions[Attaching your {PlatformName} subscription] in _{TitleCentralAuth}_.
\ No newline at end of file
+To resolve this issue, enable the repository by using the `subscription-manager` command in the command line. For more information about resolving this issue, see the _Troubleshooting_ section of link:{URLCentralAuth}/assembly-gateway-licensing#proc-attaching-subscriptions[Attaching your {PlatformName} subscription] in _{TitleCentralAuth}_.
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc
index e393e55b46..0243e50aed 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-pending.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: PROCEDURE
[id="troubleshoot-job-pending"]
= Issue - Jobs in {ControllerName} are stuck in a pending state
+[role="_abstract"]
After launching jobs in {ControllerName}, the jobs stay in a pending state and do not start.
There are a few reasons jobs can become stuck in a pending state. For more information about troubleshooting this issue, see link:{URLControllerAdminGuide}/controller-troubleshooting#controller-playbook-pending[Playbook stays in pending] in _{TitleControllerAdminGuide}_
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc
index a851de36b1..93c88a3c1e 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-permissions.adoc
@@ -2,6 +2,7 @@
[id="troubleshoot-job-permissions"]
= Issue - Jobs in {PrivateHubName} are failing with "denied: requested access to the resource is denied, unauthorized: Insufficient permissions" error message
+[role="_abstract"]
Jobs are failing with the error message "denied: requested access to the resource is denied, unauthorized: Insufficient permissions" when using an {ExecEnvShort} in {PrivateHubName}.
This issue happens when your {PrivateHubName} is protected with a password or token and the registry credential is not assigned to the {ExecEnvShort}.
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc
index 13f6316387..83776b047e 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-resolve-module.adoc
@@ -3,6 +3,7 @@
[id="troubleshoot-job-resolve-module"]
= Issue - Jobs are failing with “ERROR! couldn’t resolve module/action” error message
+[role="_abstract"]
Jobs are failing with the error message “ERROR! couldn't resolve module/action 'module name'. This often indicates a misspelling, missing collection, or incorrect module path”.
This error can happen when the collection associated with the module is missing from the {ExecEnvShort}.
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-timeout.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-timeout.adoc
index e8a6d7a3d9..645d003bea 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-timeout.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-job-timeout.adoc
@@ -3,6 +3,7 @@
[id="troubleshoot-job-timeout"]
= Issue - Jobs are failing with “Timeout (12s) waiting for privilege escalation prompt” error message
+[role="_abstract"]
This error can happen when the timeout value is too small, causing the job to stop before completion. The default timeout value for connection plugins is `10`.
To resolve the issue, increase the timeout value by completing one of the following methods.
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc
index 1f6ab25d0d..c6b8842cb4 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-must-gather.adoc
@@ -3,6 +3,7 @@
[id="troubleshoot-must-gather"]
= Troubleshooting {PlatformNameShort} on {OCPShort} by using the must-gather command
+[role="_abstract"]
The `oc adm must-gather` command line interface (CLI) command collects information from your {PlatformNameShort} installation deployed on {OCPShort}. It gathers information that is often needed for debugging issues, including resource definitions and service logs.
Running the `oc adm must-gather` CLI command creates a new directory containing the collected data that you can use to troubleshoot or attach to your support case.
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-sosreport.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-sosreport.adoc
index 8c8419e31f..fe085d4b5d 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-sosreport.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-sosreport.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: PROCEDURE
[id="troubleshoot-sosreport"]
= Troubleshooting {PlatformNameShort} on VM-based installations by generating an sos report
+[role="_abstract"]
The `sos` utility collects configuration, diagnostic, and troubleshooting data from your {PlatformNameShort} on a {VMBase}.
For more information about installing and using the `sos` utility, see link:{BaseURL}/red_hat_enterprise_linux/9/html-single/getting_the_most_from_your_support_experience/index#generating-an-sos-report-for-technical-support_getting-the-most-from-your-support-experience[Generating an sos report for technical support].
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-ssl-tls-issues.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-ssl-tls-issues.adoc
index 8d64308bbf..32f19b35cd 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-ssl-tls-issues.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-ssl-tls-issues.adoc
@@ -4,6 +4,7 @@
= Troubleshooting SSL/TLS issues
+[role="_abstract"]
To troubleshoot issues with SSL/TLS, verify the certificate chain, use the correct certificates, and confirm that a trusted Certificate Authority (CA) signed the certificate.
.Procedure
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-subnet-conflict.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-subnet-conflict.adoc
index 9b4ab507fe..3625049276 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-subnet-conflict.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-subnet-conflict.adoc
@@ -1,6 +1,8 @@
+:_mod-docs-content-type: PROCEDURE
[id="troubleshoot-subnet-conflict"]
= Issue - The default subnet used in {PlatformNameShort} containers conflicts with the internal network
+[role="_abstract"]
The default subnet used in {PlatformNameShort} containers conflicts with the internal network resulting in "No route to host" errors.
To resolve this issue, update the default classless inter-domain routing (CIDR) value so it does not conflict with the CIDR used by the default Podman networking plugin.
diff --git a/downstream/modules/troubleshooting-aap/proc-troubleshoot-use-in-controller.adoc b/downstream/modules/troubleshooting-aap/proc-troubleshoot-use-in-controller.adoc
index 744f6829f6..3167584127 100644
--- a/downstream/modules/troubleshooting-aap/proc-troubleshoot-use-in-controller.adoc
+++ b/downstream/modules/troubleshooting-aap/proc-troubleshoot-use-in-controller.adoc
@@ -1,6 +1,9 @@
+:_mod-docs-content-type: PROCEDURE
[id="troubleshoot-use-in-controller"]
+
= Issue - Cannot select the "Use in Controller" option for {ExecEnvShort} image on {PrivateHubName}
+[role="_abstract"]
You cannot use the *Use in Controller* option for an {ExecEnvShort} image on {PrivateHubName}. You also receive the error message: “No Controllers available”.
To resolve this issue, connect {ControllerName} to your {PrivateHubName} instance.
@@ -44,10 +47,6 @@ CONNECTED_ANSIBLE_CONTROLLERS = ['__', '_
Date: Fri, 1 Aug 2025 13:28:29 -0500
Subject: [PATCH 23/71] fixes menu formatting in one module (#3969) (#3971)
---
.../modules/platform/proc-controller-review-organizations.adoc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/downstream/modules/platform/proc-controller-review-organizations.adoc b/downstream/modules/platform/proc-controller-review-organizations.adoc
index 3d023bb6c9..d017dc703a 100644
--- a/downstream/modules/platform/proc-controller-review-organizations.adoc
+++ b/downstream/modules/platform/proc-controller-review-organizations.adoc
@@ -8,7 +8,7 @@ The *Organizations* page displays the existing organizations for your installati
.Procedure
-. From the navigation panel, select menu:{MenuAMOrganizations}.
+. From the navigation panel, select {MenuAMOrganizations}.
. In the Search bar, enter an appropriate keyword for the organization you want to search for and click the arrow icon.
. From the menu bar, you can sort the list of organizations by using the arrows for *Name* to toggle your sorting preference.
. You can also sort the list by selecting *Name*, *Created* or *Last modified* from the *Sort* list.
From bbc7e27dd28becab117283acadd592f2cf64655d Mon Sep 17 00:00:00 2001
From: Jameria Self <73364088+jself-sudoku@users.noreply.github.com>
Date: Mon, 4 Aug 2025 10:24:15 -0400
Subject: [PATCH 24/71] AAP-49612 DITA Pre-migration prep work: EDA user guide
(Using automation decisions) (#3875) (#3974)
* AAP-49612 Updated xref to link in assembly-eda-credential-types.adoc
* AAP-49612 Replaced two xrefs with link tagging in DE section
* AAP-49612 Changed xref to link in rulebook activations assembly
* AAP-49612 Replaced xrefs with links in RHAAP credential for migration prep
* AAP-49612 Replaced xrefs with links in EDA user guide overview - migration prep
* AAP-49612 Replaced xrefs w/ links in Additional resources of overview
* AAP-49612 Migration prep - fix tagging that causes Vale validations
* AAP-49612 Updated add'l resources sections for migration prep
* AAP-49612 Migration prep - updated Add'l resources to links only
* AAP-49612 Migration prep - updated Add'l resources to links only, no period
* Revert "AAP-49612 Migration prep - fix tagging that causes Vale validations"
This reverts commit ef460ca4904e57a96f6ae3f148f31e64d42c6b71.
Changes to be committed:
modified: downstream/archive/archived-assemblies/eda/assembly-ansible-rulebooks.adoc
modified: downstream/archive/archived-assemblies/eda/assembly-installation-eda-controller.adoc
modified: downstream/archive/archived-assemblies/eda/assembly-using-eda-controller.adoc
* AAP-49612 Migration prep - updates to multiple file
* AAP-49612 Tweaked Example block title for Event filter plugins chapter
* AAP-49612 Tweaked Example block title for Event filter plugins chapter
---
.../eda/proc-eda-set-up-token.adoc | 1 +
.../eda/assembly-eda-credential-types.adoc | 3 +-
.../assembly-eda-decision-environments.adoc | 2 +-
.../assembly-eda-event-filter-plugins.adoc | 4 ++-
.../assembly-eda-rulebook-activations.adoc | 2 +-
.../assembly-eda-set-up-rhaap-credential.adoc | 2 +-
.../eda/assembly-eda-user-guide-overview.adoc | 34 +++++++++----------
...on-modifying-simultaneous-activations.adoc | 8 ++---
...a-build-a-custom-decision-environment.adoc | 14 ++++----
.../proc-eda-config-remote-sys-to-events.adoc | 6 ++--
...oc-eda-create-event-stream-credential.adoc | 2 ++
.../eda/proc-eda-create-event-stream.adoc | 11 ++++--
.../eda/proc-eda-delete-controller-token.adoc | 4 ++-
.../eda/proc-eda-set-up-credential-types.adoc | 2 +-
...c-eda-set-up-new-decision-environment.adoc | 4 ++-
.../eda/proc-eda-set-up-new-project.adoc | 4 ++-
.../eda/proc-eda-set-up-rhaap-credential.adoc | 2 ++
.../proc-eda-set-up-rulebook-activation.adoc | 6 +++-
.../eda/proc-eda-view-activation-output.adoc | 4 ++-
.../eda/proc-eda-view-rule-audit-actions.adoc | 4 ++-
.../eda/proc-eda-view-rule-audit-details.adoc | 2 ++
...-modifying-activations-during-install.adoc | 10 +++---
.../modules/eda/ref-eda-logging-samples.adoc | 12 +++----
23 files changed, 86 insertions(+), 57 deletions(-)
diff --git a/downstream/archive/archived-modules/eda/proc-eda-set-up-token.adoc b/downstream/archive/archived-modules/eda/proc-eda-set-up-token.adoc
index 05bb0c6831..bc8c323196 100644
--- a/downstream/archive/archived-modules/eda/proc-eda-set-up-token.adoc
+++ b/downstream/archive/archived-modules/eda/proc-eda-set-up-token.adoc
@@ -28,4 +28,5 @@ The token must be in write-scope.
====
. Select btn:[Create controller token].
+.Results
After saving the new token, you are brought to the *Controller Tokens* tab where you can delete the token.
diff --git a/downstream/assemblies/eda/assembly-eda-credential-types.adoc b/downstream/assemblies/eda/assembly-eda-credential-types.adoc
index 497ef67e28..cff423bab3 100644
--- a/downstream/assemblies/eda/assembly-eda-credential-types.adoc
+++ b/downstream/assemblies/eda/assembly-eda-credential-types.adoc
@@ -7,8 +7,7 @@
These built-in credential types are not editable. So if you want credential types that support authentication with other systems, you can create your own credential types that can be used in your source plugins. Each credential type contains an input configuration and an injector configuration that can be passed to an Ansible rulebook to configure your sources.
-For more information, see xref:eda-custom-credential-types[Custom credential types].
-//[J. Self] Will add the cross-reference/link later.
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-credential-types#eda-custom-credential-types[Custom credential types].
include::eda/con-custom-credential-types.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/eda/assembly-eda-decision-environments.adoc b/downstream/assemblies/eda/assembly-eda-decision-environments.adoc
index f7677b4c24..8f20b52629 100644
--- a/downstream/assemblies/eda/assembly-eda-decision-environments.adoc
+++ b/downstream/assemblies/eda/assembly-eda-decision-environments.adoc
@@ -6,7 +6,7 @@ Decision environments are container images that run Ansible rulebooks.
They create a common language for communicating automation dependencies, and give a standard way to build and distribute the automation environment.
You can find the default decision environment in the link:https://quay.io/repository/ansible/ansible-rulebook[Ansible-Rulebook].
-To create your own decision environment, see xref:eda-controller-install-builder[Installing ansible-builder] and xref:eda-build-a-custom-decision-environment[Building a custom decision environment for Event-Driven Ansible within Ansible Automation Platform].
+To create your own decision environment, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-decision-environments#eda-controller-install-builder[Installing ansible-builder] and link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-decision-environments#eda-build-a-custom-decision-environment[Building a custom decision environment for Event-Driven Ansible within Ansible Automation Platform].
include::eda/ref-eda-controller-install-builder.adoc[leveloffset=+1]
include::eda/proc-eda-build-a-custom-decision-environment.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc b/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc
index 9d5852e57c..4c21c3bfb6 100644
--- a/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc
+++ b/downstream/assemblies/eda/assembly-eda-event-filter-plugins.adoc
@@ -22,7 +22,8 @@ You can chain event filters one after the other, and the updated data is sent fr
Event filters are defined in the rulebook after a source is defined.
When the rulebook starts the source plugin it associates the correct filters and transforms the data before putting it into the queue.
-.Example
+[Example]
+====
----
sources:
@@ -45,5 +46,6 @@ Since every event should record the origin of the event the filter `eda.builtin.
The `received_at` stores a date time in UTC ISO8601 format and includes the microseconds.
The `uuid` stores the unique id for the event.
The `meta key` is used to store metadata about the event and its needed to correctly report about the events in the aap-server.
+====
include::eda/con-eda-author-event-filters.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc b/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc
index 706241d65b..9c391db5aa 100644
--- a/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc
+++ b/downstream/assemblies/eda/assembly-eda-rulebook-activations.adoc
@@ -25,7 +25,7 @@ The following actions are currently supported:
To view further details, see link:https://ansible.readthedocs.io/projects/rulebook/en/stable/actions.html[Actions].
-A rulebook activation is a process running in the background defined by a decision environment executing a specific rulebook. You can set up your rulebook activation by following xref:eda-set-up-rulebook-activation[Setting up a rulebook activation].
+A rulebook activation is a process running in the background defined by a decision environment executing a specific rulebook. You can set up your rulebook activation by following link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-rulebook-activations#eda-set-up-rulebook-activation[Setting up a rulebook activation].
[WARNING]
====
diff --git a/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc b/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc
index 756b6c18af..fbbde40063 100644
--- a/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc
+++ b/downstream/assemblies/eda/assembly-eda-set-up-rhaap-credential.adoc
@@ -6,7 +6,7 @@ When {EDAcontroller} is deployed on {PlatformNameShort} {PlatformVers}, you can
[NOTE]
====
-If you deployed {EDAcontroller} with {PlatformNameShort} 2.4, you probably used controller tokens to connect {ControllerName} and {EDAcontroller}. These controller tokens have been deprecated in {PlatformNameShort} {PlatformVers}. To delete deprecated controller tokens and the rulebook activations associated with them, complete the following procedures starting with xref:replacing-controller-tokens[Replacing controller tokens in {PlatformNameShort} {PlatformVers}] before proceeding with xref:eda-set-up-rhaap-credential[Setting up a {PlatformName} credential].
+If you deployed {EDAcontroller} with {PlatformNameShort} 2.4, you probably used controller tokens to connect {ControllerName} and {EDAcontroller}. These controller tokens have been deprecated in {PlatformNameShort} {PlatformVers}. To delete deprecated controller tokens and the rulebook activations associated with them, complete the following procedures starting with link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-set-up-rhaap-credential-type#replacing-controller-tokens[Replacing controller tokens in {PlatformNameShort} {PlatformVers}] before proceeding with link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-set-up-rhaap-credential-type#eda-set-up-rhaap-credential[Setting up a {PlatformName} credential].
====
include::eda/con-replacing-controller-tokens.adoc[leveloffset=+1]
diff --git a/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc b/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc
index efac82038d..81609cdc68 100644
--- a/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc
+++ b/downstream/assemblies/eda/assembly-eda-user-guide-overview.adoc
@@ -8,18 +8,18 @@ These tools monitor IT solutions and identify events and automatically implement
The following procedures form the user configuration:
-* xref:eda-credentials[Credentials]
-* xref:eda-credential-types[Credential types]
-* xref:eda-projects[Projects]
-* xref:eda-decision-environments[Decision environments]
-* xref:eda-set-up-rhaap-credential-type[Red Hat Ansible Automation Platform credential]
-* xref:eda-rulebook-activations[Rulebook activations]
-* xref:eda-rulebook-troubleshooting[Rulebook activations troubleshooting]
-* xref:eda-rule-audit[Rule audit]
-* xref:simplified-event-routing[Simplified event routing]
-* xref:eda-performance-tuning[Performance tuning for {EDAcontroller}]
-* xref:eda-event-filter-plugins[Event filter plugins]
-* xref:eda-logging-strategy[Event-Driven Ansible logging strategy]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-credentials[Credentials]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-credential-types[Credential types]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-projects[Projects]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-decision-environments[Decision environments]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-set-up-rhaap-credential-type[Red Hat Ansible Automation Platform credential]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-rulebook-activations[Rulebook activations]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-rulebook-troubleshooting[Rulebook activations troubleshooting]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-rule-audit[Rule audit]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/simplified-event-routing[Simplified event routing]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-performance-tuning[Performance tuning for {EDAcontroller}]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-event-filter-plugins[Event filter plugins]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-logging-strategy[Event-Driven Ansible logging strategy]
[NOTE]
@@ -31,9 +31,7 @@ The following procedures form the user configuration:
[role="_additional-resources"]
.Additional resources
-* For information on how to set user permissions for {EDAcontroller}, see the following in the link:{URLCentralAuth}/index[Access management and authentication guide]:
-
-. link:{URLCentralAuth}/gw-managing-access#ref-controller-user-roles[Adding roles for a user]
-. link:{URLCentralAuth}/assembly-gw-roles[Roles]
-
-* If you plan to use {EDAName} 2.5 with a 2.4 {PlatformNameShort}, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/using_event-driven_ansible_2.5_with_ansible_automation_platform_2.4/index[Using Event-Driven Ansible 2.5 with Ansible Automation Platform 2.4].
+* link:{URLCentralAuth}/index[Access management and authentication guide]:
+** link:{URLCentralAuth}/gw-managing-access#ref-controller-user-roles[Adding roles for a user]
+** link:{URLCentralAuth}/assembly-gw-roles[Roles]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/using_event-driven_ansible_2.5_with_ansible_automation_platform_2.4/index[Using Event-Driven Ansible 2.5 with Ansible Automation Platform 2.4].
diff --git a/downstream/modules/eda/con-modifying-simultaneous-activations.adoc b/downstream/modules/eda/con-modifying-simultaneous-activations.adoc
index 5b0f93e2cf..c594e55be5 100644
--- a/downstream/modules/eda/con-modifying-simultaneous-activations.adoc
+++ b/downstream/modules/eda/con-modifying-simultaneous-activations.adoc
@@ -1,8 +1,8 @@
+:_mod-docs-content-type: CONCEPT
[id="modifying-simultaneous-activations"]
= Modifying the number of simultaneous rulebook activations
-[role="_abstract"]
By default, {EDAcontroller} allows 12 rulebook activations per node. For example, with two worker or hybrid nodes, it results in a limit of 24 activations in total to run simultaneously.
If more than 24 rulebook activations are created, the expected behavior is that subsequent rulebook activations wait until there is an available rulebook activation worker.
In this case, the rulebook activation status is displayed as *Pending* even if there is enough free memory and CPU on your {EDAcontroller} instance.
@@ -17,6 +17,6 @@ To change this behavior, you must change the default maximum number of running r
include::proc-modifying-activations-during-install.adoc[leveloffset=+1]
include::proc-modifying-activations-after-install.adoc[leveloffset=+1]
-.Additional Resources
-* For more information about rulebook activations, see the link:https://access.redhat.com/documentation/en-us/red_hat_ansible_automation_platform/2.4/html-single/event-driven_ansible_controller_user_guide/index#eda-rulebook-activations[Rulebook activations].
-* For more information about modifying simultaneous rulebook activations during or after {EDAName} on {OCPShort}, see the example in link:{URLOperatorInstallation}/appendix-operator-crs_appendix-operator-crs#eda_max_running_activations_yml[eda_max_running_activations_yml].
\ No newline at end of file
+.Additional references
+* link:https://access.redhat.com/documentation/en-us/red_hat_ansible_automation_platform/2.4/html-single/event-driven_ansible_controller_user_guide/index#eda-rulebook-activations[Rulebook activations]
+* link:{URLOperatorInstallation}/appendix-operator-crs_appendix-operator-crs#eda_max_running_activations_yml[eda_max_running_activations_yml]
\ No newline at end of file
diff --git a/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc b/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc
index 1a2176da98..cedbc89c42 100644
--- a/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc
+++ b/downstream/modules/eda/proc-eda-build-a-custom-decision-environment.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-build-a-custom-decision-environment"]
= Building a custom decision environment for {EDAName}
@@ -14,12 +15,6 @@ You can create a custom decision environment for {EDAName} that provides a custo
* {EDAName}
* {Builder} > = 3.0
-.Procedure
-
-* Use `de-minimal` as the base image with {Builder} to build your custom decision environments.
-This image is built from a base image provided by Red Hat at link:https://catalog.redhat.com/software/containers/ansible-automation-platform-25/de-minimal-rhel9/650a5672a370728c710acaab[{PlatformNameShort} minimal decision environment].
-
-+
[IMPORTANT]
====
* Use the correct {EDAcontroller} decision environment in {PlatformNameShort} to prevent rulebook activation failure.
@@ -28,6 +23,13 @@ This image is built from a base image provided by Red Hat at link:https://catalo
** If you want to connect {EDAcontroller} to {PlatformNameShort} {PlatformVers}, you must use `registry.redhat.io/ansible-automation-platform-25/de-minimal-rhel9:latest`
====
+.Procedure
+
+* Use `de-minimal` as the base image with {Builder} to build your custom decision environments.
+This image is built from a base image provided by Red Hat at link:https://catalog.redhat.com/software/containers/ansible-automation-platform-25/de-minimal-rhel9/650a5672a370728c710acaab[{PlatformNameShort} minimal decision environment].
+
+.Example
+
The following is an example of the {Builder} definition file that uses `de-minimal` as a base image to build a custom decision environment with the ansible.eda collection:
-----
version: 3
diff --git a/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc b/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc
index d9820c9936..a8420ead67 100644
--- a/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc
+++ b/downstream/modules/eda/proc-eda-config-remote-sys-to-events.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-config-remote-sys-to-events"]
= Configuring your remote system to send events
@@ -15,7 +16,7 @@ The following example demonstrates how to configure webhooks in a remote system
. Log in to your GitHub repository.
. Click *Your profile name → Your repositories*.
-
++
[NOTE]
====
If you do not have a repository, click *New* to create a new one, select an owner, add a *Repository name*, and click *Create repository*.
@@ -29,4 +30,5 @@ If you do not have a repository, click *New* to create a new one, select an owne
. Enter your *Secret*.
. Click *Add webhook*.
-After the webhook has been added, it attempts to send a test payload to ensure there is connectivity between the two systems (GitHub and {EDAcontroller}). If it can successfully send the data you will see a green check mark next to the *Webhook URL* with the message, *Last delivery was successful*.
\ No newline at end of file
+.Results
+After the webhook has been added, it attempts to send a test payload to ensure there is connectivity between the two systems (GitHub and {EDAcontroller}). If it can successfully send the data, you will see a green check mark next to the *Webhook URL* with the message, *Last delivery was successful*.
\ No newline at end of file
diff --git a/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc b/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc
index 837841d008..97405a078d 100644
--- a/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc
+++ b/downstream/modules/eda/proc-eda-create-event-stream-credential.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-create-event-stream-credential"]
= Creating an event stream credential
@@ -29,4 +30,5 @@ Type Details:: Add the requested information for the credential type you selecte
. Click btn:[Create credential].
+.Results
The Details page is displayed. From there or the *Credentials* list view, you can edit or delete it.
diff --git a/downstream/modules/eda/proc-eda-create-event-stream.adoc b/downstream/modules/eda/proc-eda-create-event-stream.adoc
index 580e5822e6..09882982a3 100644
--- a/downstream/modules/eda/proc-eda-create-event-stream.adoc
+++ b/downstream/modules/eda/proc-eda-create-event-stream.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-create-event-stream"]
= Creating an event stream
@@ -7,7 +8,7 @@ You can create event streams that will be attached to a rulebook activation.
.Prerequisites
* If you will be attaching your event stream to a rulebook activation, ensure that your activation has a decision environment and project already set up.
-* If you plan to connect to {ControllerName} to run your rulebook activation, ensure that you have created a {PlatformName} credential type in addition to the decision environment and project. For more information, see xref:eda-set-up-rhaap-credential[Setting up a {PlatformName} credential].
+* If you plan to connect to {ControllerName} to run your rulebook activation, ensure that you have created a {PlatformName} credential type in addition to the decision environment and project. For more information, see link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-set-up-rhaap-credential-type#eda-set-up-rhaap-credential[Setting up a {PlatformName} credential].
.Procedure
@@ -36,13 +37,17 @@ The event stream's event forwarding can be disabled for testing purposes while d
. Click btn:[Create event stream].
+.Results
After creating your event stream, the following outputs occur:
* The Details page is displayed. From there or the Event Streams list view, you can edit or delete it. Also, the Event Streams page shows all of the event streams you have created and the following columns for each event: *Events received*, *Last event received*, and *Event stream type*. As the first two columns receive external data through the event stream, they are continuously updated to let you know they are receiving events from remote systems.
* If you disabled the event stream, the Details page is displayed with a warning message, *This event stream is disabled*.
-* Your new event stream generates a URL that is necessary when you configure the webhook on the remote system that sends events.
-
++
[NOTE]
====
After an event stream is created, the associated credential cannot be deleted until the event stream it is attached to is deleted.
====
+
+* Your new event stream generates a URL that is necessary when you configure the webhook on the remote system that sends events.
+
+
diff --git a/downstream/modules/eda/proc-eda-delete-controller-token.adoc b/downstream/modules/eda/proc-eda-delete-controller-token.adoc
index eb9d9895b4..bdcc4cf390 100644
--- a/downstream/modules/eda/proc-eda-delete-controller-token.adoc
+++ b/downstream/modules/eda/proc-eda-delete-controller-token.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type:
[id="eda-delete-controller-token"]
= Deleting controller tokens
@@ -15,4 +16,5 @@ Before you can set up {PlatformName} credentials, you must delete any existing c
. Select the *Tokens* tab.
. Delete all of your previous controller tokens.
-After deleting the controller tokens and rulebook activations, proceed with xref:eda-set-up-rhaap-credential[Setting up a {PlatformName} credential].
+.Next steps
+After deleting the controller tokens and rulebook activations, proceed with link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-set-up-rhaap-credential-type#eda-set-up-rhaap-credential[Setting up a {PlatformName} credential].
diff --git a/downstream/modules/eda/proc-eda-set-up-credential-types.adoc b/downstream/modules/eda/proc-eda-set-up-credential-types.adoc
index a5bd0af425..dd2ece9b34 100644
--- a/downstream/modules/eda/proc-eda-set-up-credential-types.adoc
+++ b/downstream/modules/eda/proc-eda-set-up-credential-types.adoc
@@ -97,4 +97,4 @@ Your newly created credential type is displayed in the list of credential types.
.Additional resources
-For information about how to create a new credential, see xref:eda-set-up-credential[Setting up credentials].
+link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-credentials#eda-set-up-credential[Setting up credentials].
diff --git a/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc b/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc
index 14ae014b49..b291931660 100644
--- a/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc
+++ b/downstream/modules/eda/proc-eda-set-up-new-decision-environment.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-set-up-new-decision-environment"]
= Setting up a new decision environment
@@ -7,7 +8,7 @@ You can import a decision environment into your {EDAcontroller} using a default
.Prerequisites
* You have set up a credential, if necessary.
-For more information, see the xref:eda-set-up-credential[Setting up credentials] section.
+For more information, see the link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-credentials#eda-set-up-credential[Setting up credentials] section.
* You have pushed a decision environment image to an image repository or you chose to use the `de-minimal` image located in link:http://registry.redhat.io/[registry.redhat.io].
.Procedure
@@ -24,6 +25,7 @@ Image:: This is the full image location, including the container registry, image
Credential:: This field is optional. This is the credential needed to use the decision environment image.
. Select btn:[Create decision environment].
+.Results
Your decision environment is now created and can be managed on the *Decision Environments* page.
After saving the new decision environment, the decision environment's details page is displayed.
diff --git a/downstream/modules/eda/proc-eda-set-up-new-project.adoc b/downstream/modules/eda/proc-eda-set-up-new-project.adoc
index 84ccb95c6b..74551bf3e2 100644
--- a/downstream/modules/eda/proc-eda-set-up-new-project.adoc
+++ b/downstream/modules/eda/proc-eda-set-up-new-project.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-set-up-new-project"]
= Setting up a new project
@@ -8,7 +9,7 @@ You can set up projects to manage and store your rulebooks in {EDAcontroller}.
// [ddacosta] I'm not sure whether there will be an EDA specific dashboard in the gateway. Step 1 might need to change to something like "Log in to AAP".
* You are logged in to the {PlatformNameShort} Dashboard as a Content Consumer.
* You have set up a credential, if necessary.
-For more information, see the xref:eda-set-up-credential[Setting up credentials] section.
+For more information, see the link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_automation_decisions/eda-credentials#eda-set-up-credential[Setting up credentials][Setting up credentials] section.
* You have an existing repository containing rulebooks that are integrated with playbooks contained in a repository to be used by {ControllerName}.
.Procedure
@@ -41,6 +42,7 @@ You can disable this option if you have a local repository that uses self-signed
====
. Select btn:[Create project].
+.Results
Your project is now created and can be managed in the *Projects* page.
After saving the new project, the project's details page is displayed.
diff --git a/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc b/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc
index 6d349bbc42..d999ee0b55 100644
--- a/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc
+++ b/downstream/modules/eda/proc-eda-set-up-rhaap-credential.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-set-up-rhaap-credential"]
= Setting up a {PlatformName} credential
@@ -37,4 +38,5 @@ For {PlatformNameShort} {PlatformVers}, use the following example: \https://
Date: Mon, 4 Aug 2025 15:26:21 +0100
Subject: [PATCH 25/71] DITA migration changes: CAG Ch12 (#3967) (#3976)
* DITA migration changes: CAG Ch12
Configuring automation execution UI and modular compliance chapter 12
https://issues.redhat.com/browse/AAP-46732
* DITA migration changes: CAG Ch12
Correction
Configuring automation execution UI and modular compliance chapter 12
https://issues.redhat.com/browse/AAP-46732
---
.../platform/assembly-metrics-utility.adoc | 27 ++++-
.../con-configuring-the-metrics-utility.adoc | 2 -
.../platform/proc-configure-a-config-map.adoc | 8 +-
.../proc-controller-metrics-utility-rhel.adoc | 42 +++-----
...oc-controller-modify-run-schedule-OCP.adoc | 10 +-
.../platform/proc-deploy-controller.adoc | 2 +-
.../platform/proc-fetch-a-report-on-ocp.adoc | 90 ++++++++++++++++
.../platform/proc-fetch-a-report-on-rhel.adoc | 14 +++
.../proc-modifying-the-run-schedule.adoc | 17 +--
.../proc-object-storaage-with-s3.adoc | 27 +++++
.../proc-object-storage-with-rhel.adoc | 16 +++
downstream/modules/platform/ref-ccsp.adoc | 71 ------------
downstream/modules/platform/ref-ccspv2.adoc | 102 +-----------------
.../ref-fetching-a-monthly-report.adoc | 98 -----------------
.../platform/ref-filter-by-organization.adoc | 15 +++
.../platform/ref-optional-build-sheets.adoc | 41 +++++++
.../platform/ref-optional-collectors.adoc | 21 ++++
.../ref-optional-gather-collectors.adoc | 21 ++++
.../modules/platform/ref-optional-sheets.adoc | 56 ++++++++++
.../ref-select-a-date-range-ccspv2.adoc | 21 ++++
.../ref-select-report-date-range.adoc | 18 ++++
.../platform/ref-storage-invocation.adoc | 1 +
.../platform/ref-supported-storage.adoc | 32 ------
23 files changed, 393 insertions(+), 359 deletions(-)
create mode 100644 downstream/modules/platform/proc-fetch-a-report-on-ocp.adoc
create mode 100644 downstream/modules/platform/proc-fetch-a-report-on-rhel.adoc
create mode 100644 downstream/modules/platform/proc-object-storaage-with-s3.adoc
create mode 100644 downstream/modules/platform/proc-object-storage-with-rhel.adoc
create mode 100644 downstream/modules/platform/ref-filter-by-organization.adoc
create mode 100644 downstream/modules/platform/ref-optional-build-sheets.adoc
create mode 100644 downstream/modules/platform/ref-optional-collectors.adoc
create mode 100644 downstream/modules/platform/ref-optional-gather-collectors.adoc
create mode 100644 downstream/modules/platform/ref-optional-sheets.adoc
create mode 100644 downstream/modules/platform/ref-select-a-date-range-ccspv2.adoc
create mode 100644 downstream/modules/platform/ref-select-report-date-range.adoc
diff --git a/downstream/assemblies/platform/assembly-metrics-utility.adoc b/downstream/assemblies/platform/assembly-metrics-utility.adoc
index a8c1d9cb08..fb8a8ac14b 100644
--- a/downstream/assemblies/platform/assembly-metrics-utility.adoc
+++ b/downstream/assemblies/platform/assembly-metrics-utility.adoc
@@ -1,10 +1,7 @@
-:_newdoc-version: 2.18.3
-:_template-generated: 2024-07-12
+:_mod-docs-content-type: ASSEMBLY
ifdef::context[:parent-context-of-metrics-utility: {context}]
-:_mod-docs-content-type: ASSEMBLY
-
[id="metrics-utility"]
:context: metrics-utility
@@ -48,16 +45,32 @@ include::platform/proc-deploy-controller.adoc[leveloffset=+3]
include::platform/ref-fetching-a-monthly-report.adoc[leveloffset=+1]
+include::platform/proc-fetch-a-report-on-rhel.adoc[leveloffset=+2]
+
+include::platform/proc-fetch-a-report-on-ocp.adoc[leveloffset=+2]
+
include::platform/proc-modifying-the-run-schedule.adoc[leveloffset=+1]
include::platform/proc-controller-modify-run-schedule-OCP.adoc[leveloffset=+2]
include::platform/ref-supported-storage.adoc[leveloffset=+1]
+include::platform/proc-object-storage-with-rhel.adoc[leveloffset=+2]
+
+include::platform/proc-object-storaage-with-s3.adoc[leveloffset=+2]
+
include::platform/ref-report-types.adoc[leveloffset=+1]
include::platform/ref-ccspv2.adoc[leveloffset=+2]
+include::platform/ref-optional-collectors.adoc[leveloffset=+2]
+
+include::platform/ref-optional-sheets.adoc[leveloffset=+2]
+
+include::platform/ref-filter-by-organization.adoc[leveloffset=+2]
+
+include::platform/ref-select-a-date-range-ccspv2.adoc[leveloffset=+2]
+
include::platform/ref-renewal-guidance.adoc[leveloffset=+2]
include::platform/ref-storage-invocation.adoc[leveloffset=+3]
@@ -68,6 +81,12 @@ include::platform/ref-select-a-date-range.adoc[leveloffset=+3]
include::platform/ref-ccsp.adoc[leveloffset=+2]
+include::platform/ref-optional-gather-collectors.adoc[leveloffset=+2]
+
+include::platform/ref-optional-build-sheets.adoc[leveloffset=+2]
+
+include::platform/ref-select-report-date-range.adoc[leveloffset=+2]
+
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
diff --git a/downstream/modules/platform/con-configuring-the-metrics-utility.adoc b/downstream/modules/platform/con-configuring-the-metrics-utility.adoc
index 4a1b780c56..a5e9851ca6 100644
--- a/downstream/modules/platform/con-configuring-the-metrics-utility.adoc
+++ b/downstream/modules/platform/con-configuring-the-metrics-utility.adoc
@@ -1,5 +1,3 @@
-:_newdoc-version: 2.18.3
-:_template-generated: 2024-07-15
:_mod-docs-content-type: CONCEPT
[id="configuring-the-metrics-utility"]
diff --git a/downstream/modules/platform/proc-configure-a-config-map.adoc b/downstream/modules/platform/proc-configure-a-config-map.adoc
index add3d5833a..e5a5a6e21b 100644
--- a/downstream/modules/platform/proc-configure-a-config-map.adoc
+++ b/downstream/modules/platform/proc-configure-a-config-map.adoc
@@ -2,7 +2,7 @@
[id="proc-configure-a-config-map"]
-= Create a ConfigMap in the OpenShift UI YAML view
+= Creating a ConfigMap in the OpenShift UI YAML view
To inject the `metrics-utility` cronjobs with configuration data, use the following procedure to create a ConfigMap in the OpenShift UI YAML view:
@@ -12,8 +12,8 @@ To inject the `metrics-utility` cronjobs with configuration data, use the follow
[NOTE]
====
-Metrics-utility runs as indicated by the parameters you set in the configuration file.
-You cannot run the utility cannot manually on {OCPShort}.
+`metrics-utility` runs as indicated by the parameters you set in the configuration file.
+You cannot run the utility manually on {OCPShort}.
====
.Procedure
@@ -46,4 +46,4 @@ data:
.Verification
-* To verify that you created the ConfigMap and the metric utility is installed, select *ConfigMap* from the navigation panel and look for your ConfigMap in the list.
+* To verify that you created the ConfigMap and `metrics-utility` is installed, select *ConfigMap* from the navigation panel and look for your ConfigMap in the list.
diff --git a/downstream/modules/platform/proc-controller-metrics-utility-rhel.adoc b/downstream/modules/platform/proc-controller-metrics-utility-rhel.adoc
index f05e35ce5a..de4eb1931f 100644
--- a/downstream/modules/platform/proc-controller-metrics-utility-rhel.adoc
+++ b/downstream/modules/platform/proc-controller-metrics-utility-rhel.adoc
@@ -9,13 +9,13 @@
* An active {PlatformNameShort} subscription
Metrics-utility is included with {PlatformNameShort}, so you do not need a separate installation.
-The following commands gather the relevant data and generate a link:https://connect.redhat.com/en/programs/certified-cloud-service-provider[CCSP] report containing your usage metrics.
+The following procedure gathers the relevant data and generate a link:https://connect.redhat.com/en/programs/certified-cloud-service-provider[CCSP] report containing your usage metrics.
You can configure these commands as cronjobs to ensure they run at the beginning of every month.
See link:https://www.redhat.com/sysadmin/linux-cron-command[How to schedule jobs using the Linux 'cron' utility] for more on configuring using the cron syntax.
.Procedure
-. Create two scripts in your user's home director in order to set correct variables to ensure that `metrics-utility` gathers all relevant data.
+. Create two scripts in your user's home directory to set correct variables to ensure that `metrics-utility` gathers all relevant data.
.. In `/home/my-user/cron-gather`:
+
[source, ]
@@ -58,42 +58,28 @@ metrics-utility build_report
+
. To ensure that these files are executable, run:
+
-[source, ]
-----
-chmod a+x /home/my-user/cron-gather /home/my-user/cron-report
-----
-+
+`chmod a+x /home/my-user/cron-gather /home/my-user/cron-report`
+
. To open the cron file for editing, run:
+
-[source, ]
-----
-crontab -e
-----
-+
+`crontab -e`
+
. To configure the run schedule, add the following parameters to the end of the file and specify how often you want `metrics-utility` to gather information and build a report using link:https://www.redhat.com/sysadmin/linux-cron-command[cron syntax]. In the following example, the `gather` command is configured to run every hour at 00 minutes. The `build_report` command is configured to run on the second day of each month at 4:00 AM.
+
-[source, ]
-----
-0 */1 * * * /home/my-user/cron-gather
-0 4 2 * * /home/my-user/cron-report
-----
+`0 */1 * * * /home/my-user/cron-gather`
+
+`0 4 2 * * /home/my-user/cron-report`
+
. Save and close the file.
. To verify that you saved your changes, run:
+
-[source, ]
-----
-crontab -l
-----
-+
+`crontab -l`
+
. To ensure that data is being collected, run:
+
-[source, ]
-----
-cat /var/log/cron
-----
+`cat /var/log/cron`
+
-The following is an example of the output. Note that time and date might vary depending on how your configure the run schedule:
+The following is a typical output. Note that time and date might vary depending on how your configure the run schedule:
+
[source, ]
----
@@ -108,4 +94,4 @@ May 8 09:46:26 ip-10-0-6-23 crontab[51659]: (root) END EDIT (root)
----
+
-The generated report will have the default name CCSP--.xlsx and will be deposited in the ship path that you specified in step 1a.
+The generated report will have the default name `CCSP--.xlsx` and is saved in the ship path that you specified in step 1a.
diff --git a/downstream/modules/platform/proc-controller-modify-run-schedule-OCP.adoc b/downstream/modules/platform/proc-controller-modify-run-schedule-OCP.adoc
index 82109397e8..3d36d1c411 100644
--- a/downstream/modules/platform/proc-controller-modify-run-schedule-OCP.adoc
+++ b/downstream/modules/platform/proc-controller-modify-run-schedule-OCP.adoc
@@ -4,7 +4,7 @@
= Modifying the run schedule on {OCPShort} from the {PlatformNameShort} operator
-Adjust the execution schedule of the `metrics-utility` within your {PlatformNameShort} deployment running on {OCPShort}.
+To adjust the execution schedule of the `metrics-utility` within your {PlatformNameShort} deployment running on {OCPShort}, use the following procedure:
.Procedure
@@ -18,12 +18,10 @@ Adjust the execution schedule of the `metrics-utility` within your {PlatformName
. On the next screen, select the `YAML` tab.
. In the `YAML` file, find the following parameters and enter a variable representing how often `metrics-utility` should gather data and how often it should produce a report:
+
-[source, ]
-----
-metrics_utility_cronjob_gather_schedule:
-metrics_utility_cronjob_report_schedule:
-----
+`metrics_utility_cronjob_gather_schedule:`
+
+`metrics_utility_cronjob_report_schedule:`
+
. Click btn:[Save].
. From the navigation menu, select menu:Deployments[] and then select *automation-controller-operator-controller-manager*.
. Increase the number of pods to 1.
diff --git a/downstream/modules/platform/proc-deploy-controller.adoc b/downstream/modules/platform/proc-deploy-controller.adoc
index eebf7e6b77..e486f3f872 100644
--- a/downstream/modules/platform/proc-deploy-controller.adoc
+++ b/downstream/modules/platform/proc-deploy-controller.adoc
@@ -4,7 +4,7 @@
= Deploy {ControllerName}
-To deploy {ControllerName} and specify variables for how often metrics-utility gathers usage information and generates a report, use the following procedure:
+To deploy {ControllerName} and specify variables for how often `metrics-utility` gathers usage information and generates a report, use the following procedure:
.Procedure
diff --git a/downstream/modules/platform/proc-fetch-a-report-on-ocp.adoc b/downstream/modules/platform/proc-fetch-a-report-on-ocp.adoc
new file mode 100644
index 0000000000..5030308b1a
--- /dev/null
+++ b/downstream/modules/platform/proc-fetch-a-report-on-ocp.adoc
@@ -0,0 +1,90 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="proc-fetch-a-report-on-ocp"]
+
+= Fetching a monthly report on {OCPShort} from the {PlatformNameShort} Operator
+
+Use the following playbook to fetch a monthly consumption report for {PlatformNameShort} on {OCPShort}:
+
+----
+- name: Copy directory from Kubernetes PVC to local machine
+ hosts: localhost
+
+ vars:
+ report_dir_path: "/mnt/metrics/reports/{{ year }}/{{ month }}/"
+
+ tasks:
+ - name: Create a temporary pod to access PVC data
+ kubernetes.core.k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: temp-pod
+ namespace: "{{ namespace_name }}"
+ spec:
+ containers:
+ - name: busybox
+ image: busybox
+ command: ["/bin/sh"]
+ args: ["-c", "sleep 3600"] # Keeps the container alive for 1 hour
+ volumeMounts:
+ - name: "{{ pvc }}"
+ mountPath: "/mnt/metrics"
+ volumes:
+ - name: "{{ pvc }}"
+ persistentVolumeClaim:
+ claimName: automationcontroller-metrics-utility
+ restartPolicy: Never
+ register: pod_creation
+
+ - name: Wait for both initContainer and main container to be ready
+ kubernetes.core.k8s_info:
+ kind: Pod
+ namespace: "{{ namespace_name }}"
+ name: temp-pod
+ register: pod_status
+ until: >
+ pod_status.resources[0].status.containerStatuses[0].ready
+ retries: 30
+ delay: 10
+
+ - name: Create a tarball of the directory of the report in the container
+ kubernetes.core.k8s_exec:
+ namespace: "{{ namespace_name }}"
+ pod: temp-pod
+ container: busybox
+ command: tar czf /tmp/metrics.tar.gz -C "{{ report_dir_path }}" .
+ register: tarball_creation
+
+ - name: Copy the report tarball from the container to the local machine
+ kubernetes.core.k8s_cp:
+ namespace: "{{ namespace_name }}"
+ pod: temp-pod
+ container: busybox
+ state: from_pod
+ remote_path: /tmp/metrics.tar.gz
+ local_path: "{{ local_dir }}/metrics.tar.gz"
+ when: tarball_creation is succeeded
+
+ - name: Ensure the local directory exists
+ ansible.builtin.file:
+ path: "{{ local_dir }}"
+ state: directory
+
+ - name: Extract the report tarball on the local machine
+ ansible.builtin.unarchive:
+ src: "{{ local_dir }}/metrics.tar.gz"
+ dest: "{{ local_dir }}"
+ remote_src: yes
+ extra_opts: "--strip-components=1"
+ when: tarball_creation is succeeded
+
+ - name: Delete the temporary pod
+ kubernetes.core.k8s:
+ api_version: v1
+ kind: Pod
+ namespace: "{{ namespace_name }}"
+ name: temp-pod
+ state: absent
+----
\ No newline at end of file
diff --git a/downstream/modules/platform/proc-fetch-a-report-on-rhel.adoc b/downstream/modules/platform/proc-fetch-a-report-on-rhel.adoc
new file mode 100644
index 0000000000..547ed97b82
--- /dev/null
+++ b/downstream/modules/platform/proc-fetch-a-report-on-rhel.adoc
@@ -0,0 +1,14 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="proc-fetch-a-report-on-rhel"]
+
+= Fetching a monthly report on {RHEL}
+
+Use the following procedure to fetch a monthly report on {RHEL}:
+
+.Procedure
+
+* Run:
+`scp -r username@controller_host:$METRICS_UTILITY_SHIP_PATH/data/// /local/directory/`
+
+The system saves the generated report as `CCSP--.xlsx` in the ship path that you specified.
diff --git a/downstream/modules/platform/proc-modifying-the-run-schedule.adoc b/downstream/modules/platform/proc-modifying-the-run-schedule.adoc
index e0935adf64..20618f658d 100644
--- a/downstream/modules/platform/proc-modifying-the-run-schedule.adoc
+++ b/downstream/modules/platform/proc-modifying-the-run-schedule.adoc
@@ -1,5 +1,3 @@
-:_newdoc-version: 2.18.3
-:_template-generated: 2024-07-15
:_mod-docs-content-type: PROCEDURE
[id="modifying-the-run-schedule_{context}"]
@@ -14,17 +12,12 @@ To modify the run schedule on {RHEL} and on {OCPShort}, use one of the following
. From the command line, run:
+
-[source, ]
-----
-crontab -e
-----
-+
+`crontab -e`
+
. After the code editor has opened, update the `gather` and `build` parameters using cron syntax as shown below:
+
-[source, ]
-----
-*/2 * * * * metrics-utility gather_automation_controller_billing_data --ship --until=10m
-*/5 * * * * metrics-utility build_report
-----
+`*/2 * * * * metrics-utility gather_automation_controller_billing_data --ship --until=10m`
+
+`*/5 * * * * metrics-utility build_report`
+
. Save and close the file.
diff --git a/downstream/modules/platform/proc-object-storaage-with-s3.adoc b/downstream/modules/platform/proc-object-storaage-with-s3.adoc
new file mode 100644
index 0000000000..3e9668a08a
--- /dev/null
+++ b/downstream/modules/platform/proc-object-storaage-with-s3.adoc
@@ -0,0 +1,27 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="proc-object-storaage-with-s3"]
+
+= Object storage with S3 interface
+
+To use object storage with S3 interface, for example, with AWS S3, Ceph Object storage, or Minio, you must define environment variables for data gathering and report building commands and cronjobs.
++
+----
+################
+export METRICS_UTILITY_SHIP_TARGET=s3
+# Your path in the object storage
+export METRICS_UTILITY_SHIP_PATH=path_to_data_and_reports/...
+
+################
+# Define S3 config
+export METRICS_UTILITY_BUCKET_NAME=metricsutilitys3
+export METRICS_UTILITY_BUCKET_ENDPOINT="https://s3.us-east-1.amazonaws.com"
+# For AWS S3, define also a region
+export METRICS_UTILITY_BUCKET_REGION="us-east-1"
+
+################
+# Define S3 credentials
+export METRICS_UTILITY_BUCKET_ACCESS_KEY=
+export METRICS_UTILITY_BUCKET_SECRET_KEY=
+----
+
diff --git a/downstream/modules/platform/proc-object-storage-with-rhel.adoc b/downstream/modules/platform/proc-object-storage-with-rhel.adoc
new file mode 100644
index 0000000000..3da12e167c
--- /dev/null
+++ b/downstream/modules/platform/proc-object-storage-with-rhel.adoc
@@ -0,0 +1,16 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="proc-object-storage-with-rhel"]
+
+= Local disk
+
+For an installation of {PlatformNameShort} on {RHEL}, the default storage option is a local disk. Using an OpenShift deployment of {OCPShort}, default storage is a path inside the attached Persistent Volume Claim.
++
+----
+# Set needed ENV VARs for gathering data and generating reports
+export METRICS_UTILITY_SHIP_TARGET=directory
+# Your path on the local disk
+export METRICS_UTILITY_SHIP_PATH=/path_to_data_and_reports/...
+----
+
+
diff --git a/downstream/modules/platform/ref-ccsp.adoc b/downstream/modules/platform/ref-ccsp.adoc
index be52239410..d18d04bb2b 100644
--- a/downstream/modules/platform/ref-ccsp.adoc
+++ b/downstream/modules/platform/ref-ccsp.adoc
@@ -5,74 +5,3 @@
= CCSP
`CCSP` is the original report format. It does not include many of the customization of CCSPv2, and it is intended to be used only for the CCSP partner program.
-
-== Optional collectors for `gather` command
-
-You can use the following optional collectors for the `gather` command:
-
-* `main_jobhostsummary`
-** If present by default, this incrementally collects the `main_jobhostsummary` table from the {ControllerName} database, containing information about jobs runs and managed nodes automated.
-* `main_host`
-** This collects daily snapshots of the `main_host` table from the {ControllerName} database and has managed nodes/hosts present across {ControllerName} inventories,
-* `main_jobevent`
-** This incrementally collects the `main_jobevent` table from the {ControllerName} database and contains information about which modules, roles, and ansible collections are being used.
-* main_indirectmanagednodeaudit
-** This incrementally collects the `main_indirectmanagednodeaudit` table from the {ControllerName} database and contains information about indirectly managed nodes,
-
-----
-# Example with all optional collectors
-export METRICS_UTILITY_OPTIONAL_COLLECTORS="main_host,main_jobevent,main_indirectmanagednodeaudit"
-----
-
-== Optional sheets for `build_report` command
-
-You may use the following optional sheets for the `build_report` command:
-
-* `ccsp_summary`
-** This is a landing page specifically for partners under the CCSP program. It shows managed node usage by each {ControllerName} organization.
-** This report takes additional parameters to customize the summary page. For more information, see the following example:
-
-----
-export METRICS_UTILITY_PRICE_PER_NODE=11.55 # in USD
-export METRICS_UTILITY_REPORT_SKU=MCT3752MO
-export METRICS_UTILITY_REPORT_SKU_DESCRIPTION="EX: Red Hat Ansible Automation Platform, Full Support (1 Managed Node, Dedicated, Monthly)"
-export METRICS_UTILITY_REPORT_H1_HEADING="CCSP Reporting : ANSIBLE Consumption"
-export METRICS_UTILITY_REPORT_COMPANY_NAME="Company Name"
-export METRICS_UTILITY_REPORT_EMAIL="email@email.com"
-export METRICS_UTILITY_REPORT_RHN_LOGIN="test_login"
-export METRICS_UTILITY_REPORT_COMPANY_BUSINESS_LEADER="BUSINESS LEADER"
-export METRICS_UTILITY_REPORT_COMPANY_PROCUREMENT_LEADER="PROCUREMENT LEADER"
-----
-
-* `managed_nodes`
-** This is a deduplicated list of managed nodes automated by {ControllerName}.
-* `indirectly_managed_nodes`
-** This is a deduplicated list of indirect managed nodes automated by {ControllerName}.
-* `inventory_scope`
-** This is a deduplicated list of managed nodes present across all inventories of {ControllerName}.
-* `usage_by_collections`
-** This is a list of Ansible collections used in {ControllerName} job runs.
-* `usage_by_roles`
-** This is a list of roles used in {ControllerName} job runs.
-*`usage_by_modules`
-** This is a list of modules used in {ControllerName}job runs.
-
-----
-# Example with all optional sheets
-export METRICS_UTILITY_OPTIONAL_CCSP_REPORT_SHEETS='ccsp_summary,managed_nodes,indirectly_managed_nodes,inventory_scope,usage_by_collections,usage_by_roles,usage_by_modules'
-----
-
-== Selecting a date range for your CCSP report
-
-The default behavior of this report is to build a report for the previous month. The following examples describe how to override this default behavior to select a specific date range for your report:
-
-----
-# Builds report for a previous month
-metrics-utility build_report
-
-# Build report for a specific month
-metrics-utility build_report --month=2025-03
-
-# Build report for a specific month overriding an existing report
-metrics-utility build_report --month=2025-03 --force
-----
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-ccspv2.adoc b/downstream/modules/platform/ref-ccspv2.adoc
index 8964526159..56de2a7c2d 100644
--- a/downstream/modules/platform/ref-ccspv2.adoc
+++ b/downstream/modules/platform/ref-ccspv2.adoc
@@ -12,104 +12,4 @@ CCSPv2 is a report which shows the following:
The primary use of this report is for partners under the link:https://connect.redhat.com/en/programs/certified-cloud-service-provider[CCSP] program, but all customers can use it to obtain on-premise reporting showing managed nodes, jobs and content usage across their {ControllerName} organizations.
-Set the report type using `METRICS_UTILITY_REPORT_TYPE=CCSPv2`.
-
-== Optional collectors for `gather` command
-
-You can use the following optional collectors for the `gather` command:
-
-* `main_jobhostsummary`
-** If present by default, this incrementally collects data from the `main_jobhostsummary` table in the {ControllerName} database, containing information about jobs runs and managed nodes automated.
-* `main_host`
-** This collects daily snapshots of the `main_host` table in the {ControllerName} database and has managed nodes and hosts present across {ControllerName} inventories.
-* `main_jobevent`
-** This incrementally collects data from the `main_jobevent` table in the {ControllerName} database and contains information about which modules, roles, and Ansible collections are being used.
-* `main_indirectmanagednodeaudit`
-** This incrementally collects data from the `main_indirectmanagednodeaudit` table in the {ControllerName} database and contains information about indirectly managed nodes.
-
-----
-# Example with all optional collectors
-export METRICS_UTILITY_OPTIONAL_COLLECTORS="main_host,main_jobevent,main_indirectmanagednodeaudit"
-----
-
-== Optional sheets for `build_report` command
-
-You can use the following optional sheets for the `build_report` command:
-
-* `ccsp_summary`
-** This is a landing page specifically for partners under CCSP program.
-This report takes additional parameters to customize the summary page. For more information, see the following example:
-+
-----
-export METRICS_UTILITY_PRICE_PER_NODE=11.55 # in USD
-export METRICS_UTILITY_REPORT_SKU=MCT3752MO
-export METRICS_UTILITY_REPORT_SKU_DESCRIPTION="EX: Red Hat Ansible Automation Platform, Full Support (1 Managed Node, Dedicated, Monthly)"
-export METRICS_UTILITY_REPORT_H1_HEADING="CCSP NA Direct Reporting Template"
-export METRICS_UTILITY_REPORT_COMPANY_NAME="Partner A"
-export METRICS_UTILITY_REPORT_EMAIL="email@email.com"
-export METRICS_UTILITY_REPORT_RHN_LOGIN="test_login"
-export METRICS_UTILITY_REPORT_PO_NUMBER="123"
-export METRICS_UTILITY_REPORT_END_USER_COMPANY_NAME="Customer A"
-export METRICS_UTILITY_REPORT_END_USER_CITY="Springfield"
-export METRICS_UTILITY_REPORT_END_USER_STATE="TX"
-export METRICS_UTILITY_REPORT_END_USER_COUNTRY="US"
-----
-* `jobs`
-** This is a list of {ControllerName} jobs launched. It is grouped by job template.
-* `managed_nodes`
-** This is a deduplicated list of managed nodes automated by {ControllerName}.
-* `indirectly_managed_nodes`
-** This is a deduplicated list of indirect managed nodes automated by {ControllerName}.
-* `inventory_scope`
-** This is a deduplicated list of managed nodes present across all inventories of {ControllerName}.
-* `usage_by_organizations`
-** This is a list of all {ControllerName} organizations with several metrics showing the organizations usage. This provides data suitable for doing internal chargeback.
-* `usage_by_collections`
-** This is a list of Ansible collections used in a {ControllerName} job runs.
-* `usage_by_roles`
-** This is a list of roles used in {ControllerName} job runs.
-* `usage_by_modules`
-** This is a list of modules used in {ControllerName} job runs.
-* `managed_nodes_by_organization`
-** This generates a sheet per organization, listing managed nodes for every organization with the same content as the managed_nodes sheet.
-* `data_collection_status`
-** This generates a sheet with the status of every data collection done by the `gather` command for the date range the report is built for.
-
-To outline the quality of data collected it also lists:
-
-*** unusual gaps between collections (based on collection_start_timestamp)
-*** gaps in collected intervals (based on since vs until)
-+
-----
-# Example with all optional sheets
-export METRICS_UTILITY_OPTIONAL_CCSP_REPORT_SHEETS='ccsp_summary,jobs,managed_nodes,indirectly_managed_nodes,inventory_scope,usage_by_organizations,usage_by_collections,usage_by_roles,usage_by_modules,data_collection_status'
-----
-
-== Filtering reports by organization
-To filter your report so that only certain organizations are present, use this environment variable with a semicolon separated list of organization names.
-
-`export METRICS_UTILITY_ORGANIZATION_FILTER="ACME;Organization 1"`
-
-This renders only the data from these organizations in the built report. This filter currently does not have any effect on the following optional sheets:
-
-* `usage_by_collections`
-* `usage_by_roles`
-* `usage_by_modules`
-
-== Selecting a date range for your CCSPv2 report
-
-The default behavior of the CCSPv2 report is to build a report for the previous month. The following examples describe how to override this default behavior to select a specific date range for your report:
-
-----
-# Build report for a specific month
-metrics-utility build_report --month=2025-03
-
-# Build report for a specific date range, icluding the prvided days
-metrics-utility build_report --since=2025-03-01 --until=2025-03-31
-
-# Build report for a last 6 months from a current date
-metrics-utility build_report --since=6months
-
-# Build report for a last 6 months from a current date overriding an exisitng report
-metrics-utility build_report --since=6months --force
-----
\ No newline at end of file
+Set the report type using `METRICS_UTILITY_REPORT_TYPE=CCSPv2`.
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-fetching-a-monthly-report.adoc b/downstream/modules/platform/ref-fetching-a-monthly-report.adoc
index cc61c1e00f..599f70ea24 100644
--- a/downstream/modules/platform/ref-fetching-a-monthly-report.adoc
+++ b/downstream/modules/platform/ref-fetching-a-monthly-report.adoc
@@ -6,101 +6,3 @@
Fetch a monthly report from {PlatformNameShort} to gather usage metrics and create a consumption-based billing report. To fetch a monthly report on {RHEL} or on {OCPShort}, use the following procedures:
-== Fetching a monthly report on {RHEL}
-
-Use the following procedure to fetch a monthly report on {RHEL}:
-
-.Procedure
-
-. Run:
-`scp -r username@controller_host:$METRICS_UTILITY_SHIP_PATH/data/// /local/directory/`
-
-The system savess the generated report as `CCSP--.xlsx` in the ship path that you specified.
-
-== Fetching a monthly report on {OCPShort} from the {PlatformNameShort} Operator
-
-Use the following playbook to fetch a monthly consumption report for {PlatformNameShort} on {OCPShort}:
-
-----
-- name: Copy directory from Kubernetes PVC to local machine
- hosts: localhost
-
- vars:
- report_dir_path: "/mnt/metrics/reports/{{ year }}/{{ month }}/"
-
- tasks:
- - name: Create a temporary pod to access PVC data
- kubernetes.core.k8s:
- definition:
- apiVersion: v1
- kind: Pod
- metadata:
- name: temp-pod
- namespace: "{{ namespace_name }}"
- spec:
- containers:
- - name: busybox
- image: busybox
- command: ["/bin/sh"]
- args: ["-c", "sleep 3600"] # Keeps the container alive for 1 hour
- volumeMounts:
- - name: "{{ pvc }}"
- mountPath: "/mnt/metrics"
- volumes:
- - name: "{{ pvc }}"
- persistentVolumeClaim:
- claimName: automationcontroller-metrics-utility
- restartPolicy: Never
- register: pod_creation
-
- - name: Wait for both initContainer and main container to be ready
- kubernetes.core.k8s_info:
- kind: Pod
- namespace: "{{ namespace_name }}"
- name: temp-pod
- register: pod_status
- until: >
- pod_status.resources[0].status.containerStatuses[0].ready
- retries: 30
- delay: 10
-
- - name: Create a tarball of the directory of the report in the container
- kubernetes.core.k8s_exec:
- namespace: "{{ namespace_name }}"
- pod: temp-pod
- container: busybox
- command: tar czf /tmp/metrics.tar.gz -C "{{ report_dir_path }}" .
- register: tarball_creation
-
- - name: Copy the report tarball from the container to the local machine
- kubernetes.core.k8s_cp:
- namespace: "{{ namespace_name }}"
- pod: temp-pod
- container: busybox
- state: from_pod
- remote_path: /tmp/metrics.tar.gz
- local_path: "{{ local_dir }}/metrics.tar.gz"
- when: tarball_creation is succeeded
-
- - name: Ensure the local directory exists
- ansible.builtin.file:
- path: "{{ local_dir }}"
- state: directory
-
- - name: Extract the report tarball on the local machine
- ansible.builtin.unarchive:
- src: "{{ local_dir }}/metrics.tar.gz"
- dest: "{{ local_dir }}"
- remote_src: yes
- extra_opts: "--strip-components=1"
- when: tarball_creation is succeeded
-
- - name: Delete the temporary pod
- kubernetes.core.k8s:
- api_version: v1
- kind: Pod
- namespace: "{{ namespace_name }}"
- name: temp-pod
- state: absent
-----
-
diff --git a/downstream/modules/platform/ref-filter-by-organization.adoc b/downstream/modules/platform/ref-filter-by-organization.adoc
new file mode 100644
index 0000000000..f17411cf48
--- /dev/null
+++ b/downstream/modules/platform/ref-filter-by-organization.adoc
@@ -0,0 +1,15 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-filter-by-organization"]
+
+= Filtering reports by organization
+
+To filter your report so that only certain organizations are present, use this environment variable with a semicolon separated list of organization names.
+
+`export METRICS_UTILITY_ORGANIZATION_FILTER="ACME;Organization 1"`
+
+This renders only the data from these organizations in the built report. This filter currently does not have any effect on the following optional sheets:
+
+* `usage_by_collections`
+* `usage_by_roles`
+* `usage_by_modules`
diff --git a/downstream/modules/platform/ref-optional-build-sheets.adoc b/downstream/modules/platform/ref-optional-build-sheets.adoc
new file mode 100644
index 0000000000..180254ff1b
--- /dev/null
+++ b/downstream/modules/platform/ref-optional-build-sheets.adoc
@@ -0,0 +1,41 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-optional-build-sheets"]
+
+= Optional sheets for `build_report` command
+
+You may use the following optional sheets for the `build_report` command:
+
+* `ccsp_summary`
+** This is a landing page specifically for partners under the CCSP program. It shows managed node usage by each {ControllerName} organization.
+** This report takes additional parameters to customize the summary page. For more information, see the following example:
++
+----
+export METRICS_UTILITY_PRICE_PER_NODE=11.55 # in USD
+export METRICS_UTILITY_REPORT_SKU=MCT3752MO
+export METRICS_UTILITY_REPORT_SKU_DESCRIPTION="EX: Red Hat Ansible Automation Platform, Full Support (1 Managed Node, Dedicated, Monthly)"
+export METRICS_UTILITY_REPORT_H1_HEADING="CCSP Reporting : ANSIBLE Consumption"
+export METRICS_UTILITY_REPORT_COMPANY_NAME="Company Name"
+export METRICS_UTILITY_REPORT_EMAIL="email@email.com"
+export METRICS_UTILITY_REPORT_RHN_LOGIN="test_login"
+export METRICS_UTILITY_REPORT_COMPANY_BUSINESS_LEADER="BUSINESS LEADER"
+export METRICS_UTILITY_REPORT_COMPANY_PROCUREMENT_LEADER="PROCUREMENT LEADER"
+----
+
+* `managed_nodes`
+** This is a deduplicated list of managed nodes automated by {ControllerName}.
+* `indirectly_managed_nodes`
+** This is a deduplicated list of indirect managed nodes automated by {ControllerName}.
+* `inventory_scope`
+** This is a deduplicated list of managed nodes present across all inventories of {ControllerName}.
+* `usage_by_collections`
+** This is a list of Ansible collections used in {ControllerName} job runs.
+* `usage_by_roles`
+** This is a list of roles used in {ControllerName} job runs.
+*`usage_by_modules`
+** This is a list of modules used in {ControllerName}job runs.
+
+----
+# Example with all optional sheets
+export METRICS_UTILITY_OPTIONAL_CCSP_REPORT_SHEETS='ccsp_summary,managed_nodes,indirectly_managed_nodes,inventory_scope,usage_by_collections,usage_by_roles,usage_by_modules'
+----
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-optional-collectors.adoc b/downstream/modules/platform/ref-optional-collectors.adoc
new file mode 100644
index 0000000000..81f2a5a284
--- /dev/null
+++ b/downstream/modules/platform/ref-optional-collectors.adoc
@@ -0,0 +1,21 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-optional-collectors"]
+
+= Optional collectors for `gather` command
+
+You can use the following optional collectors for the `gather` command:
+
+* `main_jobhostsummary`
+** If present by default, this incrementally collects data from the `main_jobhostsummary` table in the {ControllerName} database, containing information about jobs runs and managed nodes automated.
+* `main_host`
+** This collects daily snapshots of the `main_host` table in the {ControllerName} database and has managed nodes and hosts present across {ControllerName} inventories.
+* `main_jobevent`
+** This incrementally collects data from the `main_jobevent` table in the {ControllerName} database and contains information about which modules, roles, and Ansible collections are being used.
+* `main_indirectmanagednodeaudit`
+** This incrementally collects data from the `main_indirectmanagednodeaudit` table in the {ControllerName} database and contains information about indirectly managed nodes.
++
+----
+# Example with all optional collectors
+export METRICS_UTILITY_OPTIONAL_COLLECTORS="main_host,main_jobevent,main_indirectmanagednodeaudit"
+----
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-optional-gather-collectors.adoc b/downstream/modules/platform/ref-optional-gather-collectors.adoc
new file mode 100644
index 0000000000..65898a2209
--- /dev/null
+++ b/downstream/modules/platform/ref-optional-gather-collectors.adoc
@@ -0,0 +1,21 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-optional-gather-collectors"]
+
+= Optional collectors for `gather` command
+
+You can use the following optional collectors for the `gather` command:
+
+* `main_jobhostsummary`
+** If present by default, this incrementally collects the `main_jobhostsummary` table from the {ControllerName} database, containing information about jobs runs and managed nodes automated.
+* `main_host`
+** This collects daily snapshots of the `main_host` table from the {ControllerName} database and has managed nodes/hosts present across {ControllerName} inventories,
+* `main_jobevent`
+** This incrementally collects the `main_jobevent` table from the {ControllerName} database and contains information about which modules, roles, and ansible collections are being used.
+* main_indirectmanagednodeaudit
+** This incrementally collects the `main_indirectmanagednodeaudit` table from the {ControllerName} database and contains information about indirectly managed nodes,
+
+----
+# Example with all optional collectors
+export METRICS_UTILITY_OPTIONAL_COLLECTORS="main_host,main_jobevent,main_indirectmanagednodeaudit"
+----
diff --git a/downstream/modules/platform/ref-optional-sheets.adoc b/downstream/modules/platform/ref-optional-sheets.adoc
new file mode 100644
index 0000000000..47641fcb68
--- /dev/null
+++ b/downstream/modules/platform/ref-optional-sheets.adoc
@@ -0,0 +1,56 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-optional-sheets"]
+
+= Optional sheets for `build_report` command
+
+You can use the following optional sheets for the `build_report` command:
+
+* `ccsp_summary`
+** This is a landing page specifically for partners under CCSP program.
+This report takes additional parameters to customize the summary page. For more information, see the following example:
++
+----
+export METRICS_UTILITY_PRICE_PER_NODE=11.55 # in USD
+export METRICS_UTILITY_REPORT_SKU=MCT3752MO
+export METRICS_UTILITY_REPORT_SKU_DESCRIPTION="EX: Red Hat Ansible Automation Platform, Full Support (1 Managed Node, Dedicated, Monthly)"
+export METRICS_UTILITY_REPORT_H1_HEADING="CCSP NA Direct Reporting Template"
+export METRICS_UTILITY_REPORT_COMPANY_NAME="Partner A"
+export METRICS_UTILITY_REPORT_EMAIL="email@email.com"
+export METRICS_UTILITY_REPORT_RHN_LOGIN="test_login"
+export METRICS_UTILITY_REPORT_PO_NUMBER="123"
+export METRICS_UTILITY_REPORT_END_USER_COMPANY_NAME="Customer A"
+export METRICS_UTILITY_REPORT_END_USER_CITY="Springfield"
+export METRICS_UTILITY_REPORT_END_USER_STATE="TX"
+export METRICS_UTILITY_REPORT_END_USER_COUNTRY="US"
+----
+* `jobs`
+** This is a list of {ControllerName} jobs launched. It is grouped by job template.
+* `managed_nodes`
+** This is a deduplicated list of managed nodes automated by {ControllerName}.
+* `indirectly_managed_nodes`
+** This is a deduplicated list of indirect managed nodes automated by {ControllerName}.
+* `inventory_scope`
+** This is a deduplicated list of managed nodes present across all inventories of {ControllerName}.
+* `usage_by_organizations`
+** This is a list of all {ControllerName} organizations with several metrics showing the organizations usage. This provides data suitable for doing internal chargeback.
+* `usage_by_collections`
+** This is a list of Ansible collections used in a {ControllerName} job runs.
+* `usage_by_roles`
+** This is a list of roles used in {ControllerName} job runs.
+* `usage_by_modules`
+** This is a list of modules used in {ControllerName} job runs.
+* `managed_nodes_by_organization`
+** This generates a sheet per organization, listing managed nodes for every organization with the same content as the managed_nodes sheet.
+* `data_collection_status`
+** This generates a sheet with the status of every data collection done by the `gather` command for the date range the report is built for.
+
+To outline the quality of data collected it also lists:
+
+*** unusual gaps between collections (based on collection_start_timestamp)
+*** gaps in collected intervals (based on since vs until)
++
+----
+# Example with all optional sheets
+export METRICS_UTILITY_OPTIONAL_CCSP_REPORT_SHEETS='ccsp_summary,jobs,managed_nodes,indirectly_managed_nodes,inventory_scope,usage_by_organizations,usage_by_collections,usage_by_roles,usage_by_modules,data_collection_status'
+----
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-select-a-date-range-ccspv2.adoc b/downstream/modules/platform/ref-select-a-date-range-ccspv2.adoc
new file mode 100644
index 0000000000..c2c6f9f542
--- /dev/null
+++ b/downstream/modules/platform/ref-select-a-date-range-ccspv2.adoc
@@ -0,0 +1,21 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-select-a-date-range-ccspv2"]
+
+= Selecting a date range for your CCSPv2 report
+
+The default behavior of the CCSPv2 report is to build a report for the previous month. The following examples describe how to override this default behavior to select a specific date range for your report:
+
+----
+# Build report for a specific month
+metrics-utility build_report --month=2025-03
+
+# Build report for a specific date range, icluding the prvided days
+metrics-utility build_report --since=2025-03-01 --until=2025-03-31
+
+# Build report for a last 6 months from a current date
+metrics-utility build_report --since=6months
+
+# Build report for a last 6 months from a current date overriding an exisitng report
+metrics-utility build_report --since=6months --force
+----
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-select-report-date-range.adoc b/downstream/modules/platform/ref-select-report-date-range.adoc
new file mode 100644
index 0000000000..c987298e2a
--- /dev/null
+++ b/downstream/modules/platform/ref-select-report-date-range.adoc
@@ -0,0 +1,18 @@
+:_mod-docs-content-type: REFERENCE
+
+[id="ref-select-report-date-range"]
+
+= Selecting a date range for your CCSP report
+
+The default behavior of this report is to build a report for the previous month. The following examples describe how to override this default behavior to select a specific date range for your report:
+
+----
+# Builds report for a previous month
+metrics-utility build_report
+
+# Build report for a specific month
+metrics-utility build_report --month=2025-03
+
+# Build report for a specific month overriding an existing report
+metrics-utility build_report --month=2025-03 --force
+----
\ No newline at end of file
diff --git a/downstream/modules/platform/ref-storage-invocation.adoc b/downstream/modules/platform/ref-storage-invocation.adoc
index 3387ec0857..f8309b338e 100644
--- a/downstream/modules/platform/ref-storage-invocation.adoc
+++ b/downstream/modules/platform/ref-storage-invocation.adoc
@@ -3,6 +3,7 @@
[id="ref-storage-invocation"]
= Storage and invocation
+
The `RENEWAL_GUIDANCE` report supports the use of only local disk storage to store the report results. This report does not have a gather data step. It reads directly from the controller HostMetric table, so it does not store any raw data under the `METRICS_UTILITY_SHIP_PATH`.
----
diff --git a/downstream/modules/platform/ref-supported-storage.adoc b/downstream/modules/platform/ref-supported-storage.adoc
index 04012db9f8..35f23c0f13 100644
--- a/downstream/modules/platform/ref-supported-storage.adoc
+++ b/downstream/modules/platform/ref-supported-storage.adoc
@@ -6,35 +6,3 @@
Supported storage is available for storing the raw data obtained by using the `metrics-utility gather_automation_controller_billing_data` command and storing the generated reports obtained by using the `metrics-utility build_report` command.
Apply the environment variables to this storage based on your {PlatformNameShort} installation.
-
-== Local disk
-For an installation of {PlatformNameShort} on {RHEL}, the default storage option is a local disk. Using an OpenShift deployment of {OCPShort}, default storage is a path inside the attached Persistent Volume Claim.
-
-----
-# Set needed ENV VARs for gathering data and generating reports
-export METRICS_UTILITY_SHIP_TARGET=directory
-# Your path on the local disk
-export METRICS_UTILITY_SHIP_PATH=/path_to_data_and_reports/...
-----
-
-== Object storage with S3 interface
-
-To use object storage with S3 interface, for example, with AWS S3, Ceph Object storage, or Minio, you must define environment variables for data gathering and report building commands and cronjobs.
-----
-################
-export METRICS_UTILITY_SHIP_TARGET=s3
-# Your path in the object storage
-export METRICS_UTILITY_SHIP_PATH=path_to_data_and_reports/...
-
-################
-# Define S3 config
-export METRICS_UTILITY_BUCKET_NAME=metricsutilitys3
-export METRICS_UTILITY_BUCKET_ENDPOINT="https://s3.us-east-1.amazonaws.com"
-# For AWS S3, define also a region
-export METRICS_UTILITY_BUCKET_REGION="us-east-1"
-
-################
-# Define S3 credentials
-export METRICS_UTILITY_BUCKET_ACCESS_KEY=
-export METRICS_UTILITY_BUCKET_SECRET_KEY=
-----
\ No newline at end of file
From 4540e63d56c1f8654e56b4f81fb7681915eb798e Mon Sep 17 00:00:00 2001
From: Robert Grange <95885266+rogrange@users.noreply.github.com>
Date: Mon, 4 Aug 2025 13:28:58 -0400
Subject: [PATCH 26/71] Relaase notes for async 2.5-20250730 (#3977) (#3978)
Resolves:AAP-49775
---
.../release-notes/async/aap-25-20250730.adoc | 125 ++++++++++++++++++
downstream/titles/release-notes/master.adoc | 2 +
.../release-notes/topics/tech-preview.adoc | 12 ++
3 files changed, 139 insertions(+)
create mode 100644 downstream/titles/release-notes/async/aap-25-20250730.adoc
diff --git a/downstream/titles/release-notes/async/aap-25-20250730.adoc b/downstream/titles/release-notes/async/aap-25-20250730.adoc
new file mode 100644
index 0000000000..160c0e8993
--- /dev/null
+++ b/downstream/titles/release-notes/async/aap-25-20250730.adoc
@@ -0,0 +1,125 @@
+[[aap-25-20250730]]
+
+= {PlatformNameShort} patch release July 30, 2025
+
+This release includes the following components and versions:
+
+[cols="1a,3a", options="header"]
+|===
+| Release date | Component versions
+
+| July 30, 2025|
+* {ControllerNameStart} 4.6.18
+* {HubNameStart} 4.10.6
+* {EDAName} 1.1.11
+* Container-based installer {PlatformNameShort} (bundle) 2.5-17
+* Container-based installer {PlatformNameShort} (online) 2.5-17
+* Receptor 1.5.7
+* RPM-based installer {PlatformNameShort} (bundle) 2.5-16
+* RPM-based installer {PlatformNameShort} (online) 2.5-16
+
+|===
+
+CSV Versions in this release:
+
+* Namespace-scoped Bundle: aap-operator.v2.5.0-0.1753402603
+
+* Cluster-scoped Bundle: aap-operator.v2.5.0-0.1753403065
+
+
+
+== General
+
+* The `redhat.rhel_system_roles` collection has been updated to 1.95.7.(AAP-49916)
+
+* The `ansible.windows` collection has been updated to 2.8.0.(AAP-49923)
+
+* The `ansible.eda` collection has been updated to 2.8.2.(AAP-49997)
+
+
+
+== CVE
+
+With this update, the following CVEs have been addressed:
+
+* link:https://access.redhat.com/security/cve/CVE-2025-7738[CVE-2025-7738] `python3.11-django-ansible-base`: Hide plain text *OAuth2* secrets on GitHub Enterprise and GitHub Enterprise organization authenticator configuration views in platform-gateway.(AAP-49561)
+
+* link:https://access.redhat.com/security/cve/CVE-2025-2099[CVE-2025-2099] `ansible-automation-platform-25/lightspeed-chatbot-rhel8`: Regular Expression Denial of Service (ReDoS) in uggingface/transformers.(AAP-48621)
+
+* link:https://access.redhat.com/security/cve/CVE-2025-5988[CVE-2025-5988] `automation-gateway`: CSRF origin checking is disabled.(AAP-50374)
+
+
+
+== {PlatformNameShort}
+
+=== Features
+
+* `PosixUIDGroupType` can be selected for LDAP Group Type.(AAP-49347)
+
+=== Enhancements
+
+* Optimized the handling of web socket messages from the Workflow Visualizer.(AAP-46800)
+
+=== Bug fixes
+
+* Fixed the fields `content_type` for role user assignments to indicate that null values are valid responses from the API.(AAP-49494)
+
+* Fixed the fields `team_ansible_id` for role team assignments to indicate that null values can be POSTed to the API.(AAP-49812)
+
+* Fixed an issue where `auto-complete` was not disabled on all forms for sensitive information such as usernames, passwords, secret keys, etc.(AAP-49079)
+
+* Fixed an issue related to workflow job template limits overriding workflow job template node limits upon save.(AAP-48946)
+
+* Fixed the *Min* and *Max* Limit values displayed on the *Edit Survey* form.(AAP-39933)
+
+* Fixed an issue where the case insensitivity for authentication map user attribute names and values and for group names was not available. Feature flag `FEATURE_CASE_INSENSITIVE_AUTH_MAPS` must be set to true to enable case insensitive comparisons.(AAP-49327)
+
+* Fixed an issue that adds an OIDC Callback URL field that, after creation of authenticator, displays the URL to use in setting up the IdP. The URL field is displayed on the creation page and this field is to be left blank.(AAP-49874)
+
+
+
+== {ControllerNameStart}
+
+=== Enhancements
+
+* Update the injectors for the {PlatformNameShort} credential type to work across collection.(AAP-47877)
+
+=== Bug Fixes
+
+* Removed API version from hard-coded URL in inventory plugin.(AAP-48443)
+
+* Fixed a *404* error for workflow nodes.(AAP-47362)
+
+* Fixed an issue where the {ControllerName} pod was not working after an upgrade to `aap-operator.v2.5.0-0.1750901870`.(AAP-48771)
+
+
+
+== Container-based {PlatformNameShort}
+
+=== Enhancements
+
+* Added an exclusion parameter for Container-based {PlatformNameShort} Backup, allowing users to specify snapshot paths to be excluded from the backup process.(AAP-50114)
+
+=== Bug Fixes
+
+* Fixed the issue where execution instances removed from the inventory would still be visible on the Topology View.(AAP-48615)
+
+* Fixed a bug when restoring {HubName} to a new cluster when using NFS for the hub data filesystem.(AAP-48568)
+
+* Fixed permission issues when restoring {HubName} when using NFS storage.(AAP-50118)
+
+
+
+== RPM-based {PlatformNameShort}
+
+=== Bug Fixes
+
+* {EDAName} node type is now properly checked during restore.(AAP-49004)
+
+* Fixed an issue where *gRPC* server port was not configured properly when non-default value was used.(AAP-48543)
+
+* Fixed an issue where the firewall role logic improperly restricted {EDAName} event stream ports. Firewall ports are now restricted to event hosts, enhancing network security for {EDAName} users.(AAP-49792)
+
+* Fixed an issue where the gunicorn timeout to {EDAName} API service unit was not passed.(AAP-49858)
+
+* Fixed an issue where envoy, nginx, web server, and jwt token timeouts were not aligned, and caused issues where requests time out but work continues, or tokens expire before they are used.(AAP-49153)
diff --git a/downstream/titles/release-notes/master.adoc b/downstream/titles/release-notes/master.adoc
index 579b9b8054..15b4de17ed 100644
--- a/downstream/titles/release-notes/master.adoc
+++ b/downstream/titles/release-notes/master.adoc
@@ -34,6 +34,8 @@ include::topics/docs-25.adoc[leveloffset=+1]
// == Asynchronous updates
include::async/async-updates.adoc[leveloffset=+1]
+// Async release 2.5-07-30-2025
+include::async/aap-25-20250730.adoc[leveloffset=+2]
// Async release 2.5-07-02-2025
include::async/aap-25-20250702.adoc[leveloffset=+2]
// Async release 2.5-06-11-2025
diff --git a/downstream/titles/release-notes/topics/tech-preview.adoc b/downstream/titles/release-notes/topics/tech-preview.adoc
index 818c9c6f81..3a748bde54 100644
--- a/downstream/titles/release-notes/topics/tech-preview.adoc
+++ b/downstream/titles/release-notes/topics/tech-preview.adoc
@@ -20,6 +20,18 @@ include::../snippets/technology-preview.adoc[]
// You can now configure the Controller Access Token for each resource with the `connection_secret` parameter, rather than the `tower_auth_secret` parameter. This change is compatible with earlier versions, but the `tower_auth_secret` parameter is now deprecated and will be removed in a future release.
+=== Ansible-core 2.19
+
+This link:https://access.redhat.com/articles/7128367[technical preview] includes an overhaul of the templating system and a new feature labeled Data Tagging. These changes enable reporting of numerous problematic behaviors that went undetected in previous releases, with wide-ranging positive effects on security, performance, and user experience.
+
+Backward compatibility has been preserved where practical, but some breaking changes were necessary. This guide describes some common problem scenarios with example content, error messages, and suggested solutions.
+
+We recommend you test your playbooks and roles in a staging environment with this release to determine where you may need to make changes.
+
+For further information see the link:https://ansible.readthedocs.io/projects/ansible-core/devel/porting_guides/porting_guide_core_2.19.html#id3[Ansible Porting Guide].
+
+
+
=== Availability of {AAPchatbot}
The {AAPchatbot} is now available on {PlatformNameShort} 2.5 on {OCP} as a Technology Preview release. It is an intuitive chat interface embedded within the {PlatformNameShort}, utilizing generative artificial intelligence (AI) to answer questions about the {PlatformNameShort}.
From 014e75e7077542d9123152b03d70afab3143936c Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Tue, 5 Aug 2025 10:21:52 +0100
Subject: [PATCH 27/71] Add redis cert considerations (#3982) (#3983)
Document update for containerised AAP 2.5 custom Redis Cluster certificate
https://issues.redhat.com/browse/AAP-48450
---
.../assembly-using-custom-tls-certificates.adoc | 3 +++
.../platform/con-redis-cert-considerations.adoc | 11 +++++++++++
2 files changed, 14 insertions(+)
create mode 100644 downstream/modules/platform/con-redis-cert-considerations.adoc
diff --git a/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc b/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc
index 93369f5119..c243df1c72 100644
--- a/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc
+++ b/downstream/assemblies/platform/assembly-using-custom-tls-certificates.adoc
@@ -35,5 +35,8 @@ include::platform/proc-provide-custom-ca-cert.adoc[leveloffset=+2]
// Receptor certificate considerations
include::platform/con-receptor-cert-considerations.adoc[leveloffset=+1]
+// Redis certificate considerations
+include::platform/con-redis-cert-considerations.adoc[leveloffset=+1]
+
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
diff --git a/downstream/modules/platform/con-redis-cert-considerations.adoc b/downstream/modules/platform/con-redis-cert-considerations.adoc
new file mode 100644
index 0000000000..b9c440d018
--- /dev/null
+++ b/downstream/modules/platform/con-redis-cert-considerations.adoc
@@ -0,0 +1,11 @@
+:_mod-docs-content-type: CONCEPT
+
+[id="redis-certificate-considerations"]
+= Redis certificate considerations
+
+[role="_abstract"]
+When using custom TLS certificates for Redis-related services, consider the following for mutual TLS (mTLS) communication if specifying Extended Key Usage (EKU):
+
+* The Redis server certificate (`redis_tls_cert`) should include the `serverAuth` (web server authentication) and `clientAuth` (client authentication) EKU.
+
+* The Redis client certificates (`gateway_redis_tls_cert`, `eda_redis_tls_cert`) should include the `clientAuth` (client authentication) EKU.
From f1e4fe52e974768f20d28eecb821398ad65c18d9 Mon Sep 17 00:00:00 2001
From: Ian Fowler <77341519+ianf77@users.noreply.github.com>
Date: Tue, 5 Aug 2025 11:05:49 +0100
Subject: [PATCH 28/71] DITA migration changes(2.6) CAG Ch 13 (#3987)
Configuring automation execution UI and modular compliance chapter 13
https://issues.redhat.com/browse/AAP-46733
---
...assembly-controller-secret-management.adoc | 11 +++++++
...c-controller-configure-secret-lookups.adoc | 20 ++++++-------
.../ref-aws-secrets-manager-lookup.adoc | 2 +-
...-controller-metadata-credential-input.adoc | 29 +++++++------------
4 files changed, 32 insertions(+), 30 deletions(-)
diff --git a/downstream/assemblies/platform/assembly-controller-secret-management.adoc b/downstream/assemblies/platform/assembly-controller-secret-management.adoc
index afbc61d2a4..f520ad592d 100644
--- a/downstream/assemblies/platform/assembly-controller-secret-management.adoc
+++ b/downstream/assemblies/platform/assembly-controller-secret-management.adoc
@@ -29,14 +29,25 @@ These external secret values are fetched before running a playbook that needs th
For more information about specifying secret management system credentials in the user interface, see link:{URLControllerUserGuide}/index#controller-credentials[Managing user credentials].
include::platform/proc-controller-configure-secret-lookups.adoc[leveloffset=+1]
+
include::platform/ref-controller-metadata-credential-input.adoc[leveloffset=+2]
+
include::platform/ref-aws-secrets-manager-lookup.adoc[leveloffset=+2]
+
include::platform/ref-centrify-vault-lookup.adoc[leveloffset=+2]
+
include::platform/ref-cyberark-ccp-lookup.adoc[leveloffset=+2]
+
include::platform/ref-cyberark-conjur-lookup.adoc[leveloffset=+2]
+
include::platform/ref-hashicorp-vault-lookup.adoc[leveloffset=+2]
+
include::platform/ref-hashicorp-signed-ssh.adoc[leveloffset=+2]
+
include::platform/ref-azure-key-vault-lookup.adoc[leveloffset=+2]
+
include::platform/ref-thycotic-devops-vault.adoc[leveloffset=+2]
+
include::platform/ref-thycotic-secret-server.adoc[leveloffset=+2]
+
include::platform/proc-controller-github-app-token.adoc[leveloffset=+2]
diff --git a/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc b/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc
index 87b479aa11..1cabbac124 100644
--- a/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc
+++ b/downstream/modules/platform/proc-controller-configure-secret-lookups.adoc
@@ -15,16 +15,16 @@ Use the following procedure to use {ControllerName} to configure and use each of
.Procedure
. Create an external credential for authenticating with the secret management system. At minimum, give a name for the external credential and select one of the following for the *Credential type* field:
+
-* xref:ref-aws-secrets-manager-lookup[AWS Secrets Manager Lookup]
-* xref:ref-centrify-vault-lookup[Centrify Vault Credential Provider Lookup]
-* xref:ref-cyberark-ccp-lookup[CyberArk Central Credential Provider (CCP) Lookup]
-* xref:ref-cyberark-conjur-lookup[CyberArk Conjur Secrets Manager Lookup]
-* xref:ref-hashicorp-vault-lookup[HashiCorp Vault Secret Lookup]
-* xref:ref-hashicorp-signed-ssh[HashiCorp Vault Signed SSH]
-* xref:ref-azure-key-vault-lookup[{Azure} Key Vault]
-* xref:ref-thycotic-devops-vault[Thycotic DevOps Secrets Vault]
-* xref:ref-thycotic-secret-server[Thycotic Secret Server]
-* xref:controller-github-app-token[GitHub app token lookup]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-aws-secrets-manager-lookup[AWS Secrets Manager Lookup]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-centrify-vault-lookup[Centrify Vault Credential Provider Lookup]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-cyberark-ccp-lookup[CyberArk Central Credential Provider (CCP) Lookup]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-cyberark-conjur-lookup[CyberArk Conjur Secrets Manager Lookup]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-hashicorp-vault-lookup[HashiCorp Vault Secret Lookup]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-hashicorp-signed-ssh[HashiCorp Vault Signed SSH]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-azure-key-vault-lookup[{Azure} Key Vault]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-thycotic-devops-vault[Thycotic DevOps Secrets Vault]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#ref-thycotic-secret-server[Thycotic Secret Server]
+* link:https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.6/html/configuring_automation_execution/assembly-controller-secret-management#controller-github-app-token[Configuring a GitHub App Installation Access Token Lookup]
+
In this example, the _Demo Credential_ is the target credential.
diff --git a/downstream/modules/platform/ref-aws-secrets-manager-lookup.adoc b/downstream/modules/platform/ref-aws-secrets-manager-lookup.adoc
index 906eccc013..92a5cf448b 100644
--- a/downstream/modules/platform/ref-aws-secrets-manager-lookup.adoc
+++ b/downstream/modules/platform/ref-aws-secrets-manager-lookup.adoc
@@ -7,7 +7,7 @@
This plugin enables {AWS} to be used as a credential input source to pull secrets from the {AWS} Secrets Manager.
The AWS Secrets Manager provides similar service to {Azure} Key Vault, and the AWS collection provides a lookup plugin for it.
-When AWS Secrets Manager lookup is selected for **Credential type**, give the following metadata to configure your lookup:
+When AWS Secrets Manager lookup is selected for *Credential type*, give the following metadata to configure your lookup:
* *AWS Access Key* (required): give the access key used for communicating with AWS key management system
* *AWS Secret Key* (required): give the secret as obtained by the AWS IAM console
diff --git a/downstream/modules/platform/ref-controller-metadata-credential-input.adoc b/downstream/modules/platform/ref-controller-metadata-credential-input.adoc
index 3ad1b88744..8a8595a4a8 100644
--- a/downstream/modules/platform/ref-controller-metadata-credential-input.adoc
+++ b/downstream/modules/platform/ref-controller-metadata-credential-input.adoc
@@ -6,8 +6,7 @@
The information required for the *Metadata* tab of the input source.
-[discrete]
-== AWS Secrets Manager Lookup
+*AWS Secrets Manager Lookup*
[cols="25%,50%",options="header"]
|===
@@ -16,9 +15,7 @@ The information required for the *Metadata* tab of the input source.
| AWS Secret Name (required) | Specify the AWS secret name that was generated by the AWS access key.
|===
-
-[discrete]
-== Centrify Vault Credential Provider Lookup
+*Centrify Vault Credential Provider Lookup*
[cols="25%,50%",options="header"]
|===
@@ -27,8 +24,7 @@ The information required for the *Metadata* tab of the input source.
| System Name | Specify the name used by the Centrify portal.
|===
-[discrete]
-== CyberArk Central Credential Provider Lookup
+*CyberArk Central Credential Provider Lookup*
[cols="25%,50%",options="header"]
|===
@@ -39,8 +35,7 @@ The information required for the *Metadata* tab of the input source.
| Reason | If required for the object's policy, supply a reason for checking out the secret, as CyberArk logs those.
|===
-[discrete]
-== CyberArk Conjur Secrets Lookup
+*CyberArk Conjur Secrets Lookup*
[cols="25%,50%",options="header"]
|===
@@ -49,8 +44,7 @@ The information required for the *Metadata* tab of the input source.
| Secret Version | Specify a version of the secret, if necessary, otherwise, leave it empty to use the latest version.
|===
-[discrete]
-== HashiVault Secret Lookup
+*HashiVault Secret Lookup*
[cols="25%,50%",options="header"]
|===
@@ -62,8 +56,7 @@ Leave it blank to use the first path segment of the *Path to Secret* field inste
| Secret Version (V2 Only) | Specify a version if necessary, otherwise, leave it empty to use the latest version.
|===
-[discrete]
-== HashiCorp Signed SSH
+*HashiCorp Signed SSH*
[cols="25%,50%",options="header"]
|===
@@ -78,8 +71,7 @@ So you could have a role that is permitted to get a certificate signed for root,
Hashi vault has a default user for whom it signs, for example, ec2-user.
|===
-[discrete]
-== {Azure} KMS
+*{Azure} KMS*
[cols="25%,50%",options="header"]
|===
@@ -88,8 +80,7 @@ Hashi vault has a default user for whom it signs, for example, ec2-user.
| Secret Version | Specify a version of the secret, if necessary, otherwise, leave it empty to use the latest version.
|===
-[discrete]
-== Thycotic DevOps Secrets Vault
+*Thycotic DevOps Secrets Vault*
[cols="25%,50%",options="header"]
|===
@@ -97,8 +88,8 @@ Hashi vault has a default user for whom it signs, for example, ec2-user.
| Secret Path (required) |Specify the path to where the secret information is stored, for example, /path/username.
|===
-[discrete]
-== Thycotic Secret Server
+*Thycotic Secret Server*
+
[cols="25%,50%",options="header"]
|===
| Metadata | Description
From c1b3b623293c4f2446234e3d934ba4556ca3b05c Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Tue, 5 Aug 2025 11:13:17 +0100
Subject: [PATCH 29/71] Add container details table (#3986) (#3988)
Adds a Container details table to Containerized installation guide
Containerized installation - update "Diagnosing the problem" with a mapping of container names and their usage
https://issues.redhat.com/browse/AAP-42439
---
...containerized-troubleshoot-diagnosing.adoc | 109 +++++++++++++-----
1 file changed, 82 insertions(+), 27 deletions(-)
diff --git a/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc b/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc
index b871ef0ef4..e4884292c3 100644
--- a/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc
+++ b/downstream/modules/platform/ref-containerized-troubleshoot-diagnosing.adoc
@@ -14,36 +14,91 @@ To get a list of the running container names run the following command:
$ podman ps --all --format "{{.Names}}"
----
-Example output:
-
-----
-postgresql
-redis-unix
-redis-tcp
-receptor
-automation-controller-rsyslog
-automation-controller-task
-automation-controller-web
-automation-eda-api
-automation-eda-daphne
-automation-eda-web
-automation-eda-worker-1
-automation-eda-worker-2
-automation-eda-activation-worker-1
-automation-eda-activation-worker-2
-automation-eda-scheduler
-automation-gateway-proxy
-automation-gateway
-automation-hub-api
-automation-hub-content
-automation-hub-web
-automation-hub-worker-1
-automation-hub-worker-2
-----
+.Container details
+[options="header" cols="1,1,2"]
+|===
+|Component group |Container name |Purpose
+
+|{ControllerNameStart}
+|`automation-controller-rsyslog`
+|Handles centralized logging for {ControllerName}.
+
+|{ControllerNameStart}
+|`automation-controller-task`
+|Manages and runs tasks related to {ControllerName}, such as running playbooks and interacting with inventories.
+
+|{ControllerNameStart}
+|`automation-controller-web`
+|A web server that provides a REST API for {ControllerName}. This is accessed and routed through {Gateway} for user interaction.
+
+|{EDAName}
+|`automation-eda-api`
+|Exposes the API for {EDAName}, allowing external systems to trigger and manage event-driven automations.
+
+|{EDAName}
+|`automation-eda-daphne`
+|A web server for {EDAName}, handling WebSocket connections and serving static files.
+
+|{EDAName}
+|`automation-eda-web`
+|A web server that provides a REST API for {EDAName}. This is accessed and routed through {Gateway} for user interaction.
+
+|{EDAName}
+|`automation-eda-worker-`
+|These containers run the automation rules and playbooks based on incoming events.
+
+|{EDAName}
+|`automation-eda-activation-worker-`
+|These containers manage the activation of automation rules, ensuring they run when specific conditions are met.
+
+|{EDAName}
+|`automation-eda-scheduler`
+|Responsible for scheduling and managing recurring tasks and rule activations.
+
+|{GatewayStart}
+|`automation-gateway-proxy`
+|Acts as a reverse proxy, routing incoming requests to the appropriate {PlatformNameShort} services.
+
+|{GatewayStart}
+|`automation-gateway`
+|Responsible for authentication, authorization, and overall request handling for the platform, all of which is exposed through a REST API and served by a web server.
+
+|{HubNameStart}
+|`automation-hub-api`
+|Provides the API for {HubName}, enabling interaction with collection content, user management, and other {HubName} functionality.
+
+|{HubNameStart}
+|`automation-hub-content`
+|Manages and serves Ansible Content Collections, roles, and modules stored in {HubName}.
+
+|{HubNameStart}
+|`automation-hub-web`
+|A web server that provides a REST API for {HubName}. This is accessed and routed through {Gateway} for user interaction.
+
+|{HubNameStart}
+|`automation-hub-worker-`
+|These containers handle background tasks for {HubName}, such as content synchronization, indexing, and validation.
+
+|Performance Co-Pilot
+|`pcp`
+|If Performance Co-Pilot Monitoring is enabled, this container is used for system performance monitoring and data collection.
+
+|PostgreSQL
+|`postgresql`
+|Hosts the PostgreSQL database for {PlatformNameShort}.
+
+|Receptor
+|`receptor`
+|Facilitates secure and reliable communication within {PlatformNameShort}.
+
+|Redis
+|`redis-`
+|Responsible for caching, real-time analytics and fast data retrieval.
+|===
*Inspecting the logs*
-To inspect any running container logs, run the `journalctl` command:
+Containerized {PlatformNameShort} uses `journald` for Podman logging. To inspect any running container logs, run the `journalctl` command:
----
$ journalctl CONTAINER_NAME=
From c64bef1b62fe3954395c3ca1af2397777c3c6ea1 Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Tue, 5 Aug 2025 12:07:42 +0100
Subject: [PATCH 30/71] Update nginx_tls_protocols default value (#3990)
(#3991)
RPM installer - update nginx_tls_protocols default value in documentation
https://issues.redhat.com/browse/AAP-48089
---
downstream/modules/platform/ref-controller-variables.adoc | 2 +-
downstream/modules/platform/ref-eda-controller-variables.adoc | 2 +-
downstream/modules/platform/ref-gateway-variables.adoc | 2 +-
downstream/modules/platform/ref-hub-variables.adoc | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/downstream/modules/platform/ref-controller-variables.adoc b/downstream/modules/platform/ref-controller-variables.adoc
index 321439bfea..186d5a0be0 100644
--- a/downstream/modules/platform/ref-controller-variables.adoc
+++ b/downstream/modules/platform/ref-controller-variables.adoc
@@ -115,7 +115,7 @@ Set this variable to `true` to disable HTTPS.
| `controller_nginx_https_protocols`
| Protocols that {ControllerName} supports when handling HTTPS traffic.
| Optional
-| RPM = `[TLSv1.2]`. Container = `[TLSv1.2, TLSv1.3]`
+| `[TLSv1.2, TLSv1.3]`
| `nginx_user_headers`
| `controller_nginx_user_headers`
diff --git a/downstream/modules/platform/ref-eda-controller-variables.adoc b/downstream/modules/platform/ref-eda-controller-variables.adoc
index dd30231982..183d345a63 100644
--- a/downstream/modules/platform/ref-eda-controller-variables.adoc
+++ b/downstream/modules/platform/ref-eda-controller-variables.adoc
@@ -305,7 +305,7 @@ eda_extra_settings:
| `eda_nginx_https_protocols`
| Protocols that {EDAName} supports when handling HTTPS traffic.
| Optional
-| RPM = `[TLSv1.2]`. Container = `[TLSv1.2, TLSv1.3]`.
+| `[TLSv1.2, TLSv1.3]`
|
| `eda_pg_socket`
diff --git a/downstream/modules/platform/ref-gateway-variables.adoc b/downstream/modules/platform/ref-gateway-variables.adoc
index 85e73db7b4..dfaa0f1d45 100644
--- a/downstream/modules/platform/ref-gateway-variables.adoc
+++ b/downstream/modules/platform/ref-gateway-variables.adoc
@@ -267,7 +267,7 @@ Inventory file variables for {Gateway}.
| `gateway_nginx_https_protocols`
| Protocols that {Gateway} will support when handling HTTPS traffic.
| Optional
-| RPM = `[TLSv1.2]`. Container = `[TLSv1.2, TLSv1.3]`.
+| `[TLSv1.2, TLSv1.3]`
| `redis_disable_tls`
| `gateway_redis_disable_tls`
diff --git a/downstream/modules/platform/ref-hub-variables.adoc b/downstream/modules/platform/ref-hub-variables.adoc
index 455b936ee9..427bab5245 100644
--- a/downstream/modules/platform/ref-hub-variables.adoc
+++ b/downstream/modules/platform/ref-hub-variables.adoc
@@ -373,7 +373,7 @@ For more information about the list of parameters, see link:https://django-stora
| `hub_nginx_https_protocols`
| Protocols that {HubName} will support when handling HTTPS traffic.
| Optional
-| RPM = `[TLSv1.2]`. Container = `[TLSv1.2, TLSv1.3]`.
+| `[TLSv1.2, TLSv1.3]`
|
| `hub_pg_socket`
From 2a1f006fa212d3d2ec40ee856b45812645dc25f3 Mon Sep 17 00:00:00 2001
From: Jameria Self <73364088+jself-sudoku@users.noreply.github.com>
Date: Tue, 5 Aug 2025 08:43:53 -0400
Subject: [PATCH 31/71] AAP-46217 Pre-migration compliance for Using automation
decisions, chapter 4 (#3980) (#3995)
* AAP-46217 Migration prep on Chapt 5 and one module from Chapt 2
* AAP-46217 Migration prep on Chapt 4 and one module from Chapt 2
---
downstream/assemblies/eda/assembly-eda-projects.adoc | 8 +++++---
downstream/modules/eda/con-credentials-list-view.adoc | 4 ++--
downstream/modules/eda/con-eda-projects-list-view.adoc | 1 +
downstream/modules/eda/proc-eda-delete-project.adoc | 1 +
downstream/modules/eda/proc-eda-editing-a-project.adoc | 1 +
5 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/downstream/assemblies/eda/assembly-eda-projects.adoc b/downstream/assemblies/eda/assembly-eda-projects.adoc
index a258124017..489e840299 100644
--- a/downstream/assemblies/eda/assembly-eda-projects.adoc
+++ b/downstream/assemblies/eda/assembly-eda-projects.adoc
@@ -1,10 +1,9 @@
+:_mod-docs-content-type: ASSEMBLY
[id="eda-projects"]
= Projects
-Projects are a logical collection of rulebooks.
-They must be a git repository and only http protocol is supported.
-The rulebooks of a project must be located in the path defined for {EDAName} content in Ansible collections: `/extensions/eda/rulebooks` at the root of the project.
+Projects are a logical collection of rulebooks. They must be a git repository and only http protocol is supported. The rulebooks of a project must be located in the path defined for {EDAName} content in Ansible collections: `/extensions/eda/rulebooks` at the root of the project.
[IMPORTANT]
====
@@ -12,6 +11,9 @@ To meet high availability demands, {EDAcontroller} shares centralized link:https
====
include::eda/proc-eda-set-up-new-project.adoc[leveloffset=+1]
+
include::eda/con-eda-projects-list-view.adoc[leveloffset=+1]
+
include::eda/proc-eda-editing-a-project.adoc[leveloffset=+1]
+
include::eda/proc-eda-delete-project.adoc[leveloffset=+1]
diff --git a/downstream/modules/eda/con-credentials-list-view.adoc b/downstream/modules/eda/con-credentials-list-view.adoc
index e13277096d..707b2fea8b 100644
--- a/downstream/modules/eda/con-credentials-list-view.adoc
+++ b/downstream/modules/eda/con-credentials-list-view.adoc
@@ -9,9 +9,9 @@ From the menu bar, you can search for credentials in the *Name* search field.
You also have the following options in the menu bar:
-* Choose how fields are shown in the list view by clicking the btn:[Manage columns] icon. You have four options in which you can arrange your fields:
+* *Manage columns* - You can choose how fields are shown in the list view by clicking this option. You have four ways you can arrange your fields:
** *Column* - Shows the column in the table.
** *Description* - Shows the column when the item is expanded as a full width description.
** *Expanded* - Shows the column when the item is expanded as a detail.
** *Hidden* - Hides the column.
-* Choose between a btn:[List view] or a btn:[Card view], by clicking the icons.
+* *List view* or *Card view* - You can choose between these views by clicking the applicable icons.
diff --git a/downstream/modules/eda/con-eda-projects-list-view.adoc b/downstream/modules/eda/con-eda-projects-list-view.adoc
index bb6c6ac36b..46e6b7cb9b 100644
--- a/downstream/modules/eda/con-eda-projects-list-view.adoc
+++ b/downstream/modules/eda/con-eda-projects-list-view.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-projects-list-view"]
= Projects list view
diff --git a/downstream/modules/eda/proc-eda-delete-project.adoc b/downstream/modules/eda/proc-eda-delete-project.adoc
index ef0a7ba5d5..a000bb5e1f 100644
--- a/downstream/modules/eda/proc-eda-delete-project.adoc
+++ b/downstream/modules/eda/proc-eda-delete-project.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-delete-project"]
= Deleting a project
diff --git a/downstream/modules/eda/proc-eda-editing-a-project.adoc b/downstream/modules/eda/proc-eda-editing-a-project.adoc
index 79edf34ba2..9193bd0f3a 100644
--- a/downstream/modules/eda/proc-eda-editing-a-project.adoc
+++ b/downstream/modules/eda/proc-eda-editing-a-project.adoc
@@ -1,3 +1,4 @@
+:_mod-docs-content-type: PROCEDURE
[id="eda-editing-a-project"]
= Editing a project
From d19b355f7cdc07b456db3a3e2ddcd6f930640499 Mon Sep 17 00:00:00 2001
From: Hala
Date: Tue, 5 Aug 2025 15:27:36 -0500
Subject: [PATCH 32/71] AAP-45594: Removes references to component tabs in RBAC
docs (#3972) (#3997)
* updates to roles assembly for 2.6 release
* updates for orgs and teams
* 2.6 updates for RBAC users
* updates add user step
---
.../proc-controller-add-organization-user.adoc | 10 +++++-----
.../modules/platform/proc-gw-create-roles.adoc | 6 +++---
.../modules/platform/proc-gw-delete-roles.adoc | 4 ++--
.../modules/platform/proc-gw-edit-roles.adoc | 4 ++--
.../platform/proc-gw-editing-a-user.adoc | 18 +++++++++---------
downstream/modules/platform/proc-gw-roles.adoc | 6 +++---
.../platform/proc-gw-team-add-user.adoc | 10 +++++-----
7 files changed, 29 insertions(+), 29 deletions(-)
diff --git a/downstream/modules/platform/proc-controller-add-organization-user.adoc b/downstream/modules/platform/proc-controller-add-organization-user.adoc
index 8724776a51..c589d4d08a 100644
--- a/downstream/modules/platform/proc-controller-add-organization-user.adoc
+++ b/downstream/modules/platform/proc-controller-add-organization-user.adoc
@@ -7,19 +7,19 @@
You can provide a user with access to an organization by adding them to the organization and managing the roles associated with the user. To add a user to an organization, the user must already exist. For more information, see xref:proc-controller-creating-a-user[Creating a user].
To add roles for a user, the role must already exist. See xref:proc-gw-create-roles[Creating a role] for more information.
-The following tab selections are available when adding users to an organization. When user accounts from the {ControllerName} organization have been migrated to {PlatformNameShort} 2.5 during the upgrade process, the *Automation Execution* tab shows content based on whether the users were added to the organization prior to migration.
+// [[hherbly] removed for 2.6] The following tab selections are available when adding users to an organization. When user accounts from the {ControllerName} organization have been migrated to {PlatformNameShort} 2.5 during the upgrade process, the *Automation Execution* tab shows content based on whether the users were added to the organization prior to migration.
-{PlatformNameShort}:: Reflects all users added to the organization at the platform level. From this tab, you can add users as organization members and, optionally provide specific organization level roles.
+// {PlatformNameShort}:: Reflects all users added to the organization at the platform level. From this tab, you can add users as organization members and, optionally provide specific organization level roles.
-Automation Execution:: Reflects users that were added directly to the {ControllerName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {ControllerName} and remove those memberships but not you can not add new memberships.
+// Automation Execution:: Reflects users that were added directly to the {ControllerName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {ControllerName} and remove those memberships but not you can not add new memberships.
New user memberships to an organization must be added at the platform level.
.Procedure
. From the navigation panel, select {MenuAMOrganizations}.
. From the *Organizations* list view, select the organization to which you want to add a user.
-. Click the *Users* tab to add users.
-. Select the *{PlatformNameShort}* tab and click btn:[Add users] to add user access to the team, or select the *Automation Execution* tab to view or remove user access from the team.
+. Click the *Users* tab, then btn:[Assign Users] to add users.
+// . Select the *{PlatformNameShort}* tab and click btn:[Add users] to add user access to the team, or select the *Automation Execution* tab to view or remove user access from the team.
. Select one or more users from the list by clicking the checkbox next to the name to add them as members.
. Click btn:[Next].
. Select the roles you want the selected user to have. Scroll down for a complete list of roles.
diff --git a/downstream/modules/platform/proc-gw-create-roles.adoc b/downstream/modules/platform/proc-gw-create-roles.adoc
index 4717952dd3..021f2c6245 100644
--- a/downstream/modules/platform/proc-gw-create-roles.adoc
+++ b/downstream/modules/platform/proc-gw-create-roles.adoc
@@ -9,12 +9,12 @@
.Procedure
. From the navigation panel, select {MenuAMRoles}.
-. Select a tab for the component resource for which you want to create custom roles.
+// [[hherbly]This may need to be replaced with updated steps for 2.6.]. Select a tab for the component resource for which you want to create custom roles.
+
-include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+// include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+
. Click btn:[Create role].
. Provide a *Name* and optionally include a *Description* for the role.
. Select a *Content Type*.
-. Select the *Permissions* you want assigned to this role.
+. Select the *Permissions* you want assigned to this role from the drop-down menu.
. Click btn:[Create role] to create your new role.
diff --git a/downstream/modules/platform/proc-gw-delete-roles.adoc b/downstream/modules/platform/proc-gw-delete-roles.adoc
index e67cde04ee..f4df6ffb78 100644
--- a/downstream/modules/platform/proc-gw-delete-roles.adoc
+++ b/downstream/modules/platform/proc-gw-delete-roles.adoc
@@ -9,9 +9,9 @@ Built in roles can not be deleted, however, you can delete custom roles from the
.Procedure
. From the navigation panel, select {MenuAMRoles}.
-. Select a tab for the component resource for which you want to create custom roles.
+// [[hherbly]This may need to be replaced with updated steps for 2.6.]. Select a tab for the component resource for which you want to create custom roles.
+
-include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+// include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+
. Click the *More Actions* icon *{MoreActionsIcon}* next to the role you want and select *Delete role*.
. To delete roles in bulk, select the roles you want to delete from the *Roles* list view, click the *More Actions* icon *{MoreActionsIcon}*, and select *Delete roles*.
diff --git a/downstream/modules/platform/proc-gw-edit-roles.adoc b/downstream/modules/platform/proc-gw-edit-roles.adoc
index 192a43733d..6b07202acc 100644
--- a/downstream/modules/platform/proc-gw-edit-roles.adoc
+++ b/downstream/modules/platform/proc-gw-edit-roles.adoc
@@ -9,9 +9,9 @@ Built in roles can not be changed, however, you can modify custom roles from the
.Procedure
. From the navigation panel, select {MenuAMRoles}.
-. Select a tab for the component resource for which you want to modify a custom role.
+// [[hherbly]This may need to be replaced with updated steps for 2.6.]. Select a tab for the component resource for which you want to modify a custom role.
+
-include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+// include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+
. Click the *Edit role* icon image:leftpencil.png[Edit,15,15] next to the role you want and modify the role settings as needed.
. Click btn:[Save role] to save your changes.
diff --git a/downstream/modules/platform/proc-gw-editing-a-user.adoc b/downstream/modules/platform/proc-gw-editing-a-user.adoc
index 527e591176..4f017fd8a0 100644
--- a/downstream/modules/platform/proc-gw-editing-a-user.adoc
+++ b/downstream/modules/platform/proc-gw-editing-a-user.adoc
@@ -6,13 +6,13 @@
You can modify the properties of a user account after it is created.
-In upgrade scenarios, there might be pre-existing user accounts from {ControllerName} or {HubName} services. When editing these user accounts, the *User type* checkboxes indicate whether the account had one of the following service level administrator privileges:
+// [[hherbly] removed for 2.6] In upgrade scenarios, there might be pre-existing user accounts from {ControllerName} or {HubName} services. When editing these user accounts, the *User type* checkboxes indicate whether the account had one of the following service level administrator privileges:
-Automation Execution Administrator:: A previously defined {ControllerName} administrator with full read and write privileges over automation execution resources only.
-Automation Decisions Administrator:: A previously defined {EDAName} administrator with full read and write privileges over automation decision resources only.
-Automation Content Administrator:: A previously defined {HubName} administrator with full read and write privileges over automation content resources only.
+// Automation Execution Administrator:: A previously defined {ControllerName} administrator with full read and write privileges over automation execution resources only.
+// Automation Decisions Administrator:: A previously defined {EDAName} administrator with full read and write privileges over automation decision resources only.
+// Automation Content Administrator:: A previously defined {HubName} administrator with full read and write privileges over automation content resources only.
-Platform administrators can revoke or assign administrator permissions for the individual services and designate the user as either an *{PlatformNameShort} Administrator*, *{PlatformNameShort} Auditor* or normal user. Assigning administrator privileges to all of the individual services automatically designates the user as an *{PlatformNameShort} Administrator*. See xref:proc-controller-creating-a-user[Creating a user] for more information about user types.
+// Platform administrators can revoke or assign administrator permissions for the individual services and designate the user as either an *{PlatformNameShort} Administrator*, *{PlatformNameShort} Auditor* or normal user. Assigning administrator privileges to all of the individual services automatically designates the user as an *{PlatformNameShort} Administrator*. See xref:proc-controller-creating-a-user[Creating a user] for more information about user types.
To see whether a user had service level auditor privileges, you must refer to the API.
@@ -31,9 +31,9 @@ Users previously designated as {ControllerName} or {HubName} administrators are
. The *Edit* user page is displayed where you can modify user details such as, *Password*, *Email*, *User type*, and *Organization*.
+
-[NOTE]
-====
-If the user account was migrated to {PlatformNameShort} 2.5 during the upgrade process and had administrator privileges for an individual service, additional User type checkboxes will be available. You can use these checkboxes to revoke or add individual privileges or designate the user as a platform administrator, system auditor or normal user.
-====
+// [NOTE]
+// ====
+// If the user account was migrated to {PlatformNameShort} 2.5 during the upgrade process and had administrator privileges for an individual service, additional User type checkboxes will be available. You can use these checkboxes to revoke or add individual privileges or designate the user as a platform administrator, system auditor or normal user.
+// ====
+
. After your changes are complete, click *Save user*.
\ No newline at end of file
diff --git a/downstream/modules/platform/proc-gw-roles.adoc b/downstream/modules/platform/proc-gw-roles.adoc
index 8c2314b6d5..fadc8392f8 100644
--- a/downstream/modules/platform/proc-gw-roles.adoc
+++ b/downstream/modules/platform/proc-gw-roles.adoc
@@ -9,9 +9,9 @@ You can display the roles assigned for component resources from the menu:Access
.Procedure
. From the navigation panel, select {MenuAMRoles}.
-. Select a tab for the component resource for which you want to create custom roles.
+// [[hherbly]This may need to be replaced with updated steps for 2.6.] Select a tab for the component resource for which you want to create custom roles.
+
-include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+// include::snippets/snip-gw-roles-note-multiple-components.adoc[]
+
-. From the table header, you can sort the list of roles by using the arrows for *Name*, *Description*, *Created* and *Editable* or by making sort selections in the *Sort* list.
+. From the table header, you can sort the list of roles by using the arrows for *Name*, *Description*, *Component*, *Resource Type*, and *Role Creation*, or by making sort selections in the *Sort* list.
. You can filter the list of roles by selecting *Name* or *Editable* from the filter list and clicking the arrow.
diff --git a/downstream/modules/platform/proc-gw-team-add-user.adoc b/downstream/modules/platform/proc-gw-team-add-user.adoc
index 8918eb6eee..f208cde009 100644
--- a/downstream/modules/platform/proc-gw-team-add-user.adoc
+++ b/downstream/modules/platform/proc-gw-team-add-user.adoc
@@ -5,13 +5,13 @@
= Adding users to a team
To add a user to a team, the user must already have been created. For more information, see xref:proc-controller-creating-a-user[Creating a user]. Adding a user to a team adds them as a member only. Use the *Roles* tab to assign a role for different resources to the selected team.
-The following tab selections are available when adding users to a team. When user accounts from {ControllerName} or {HubName} organizations have been migrated to {PlatformNameShort} 2.5 during the upgrade process, the *Automation Execution* and *Automation Content* tabs show content based on whether the users were added to those organizations prior to migration.
+// [[hherbly]This may need to be replaced with updated steps for 2.6.] The following tab selections are available when adding users to a team. When user accounts from {ControllerName} or {HubName} organizations have been migrated to {PlatformNameShort} 2.5 during the upgrade process, the *Automation Execution* and *Automation Content* tabs show content based on whether the users were added to those organizations prior to migration.
-{PlatformNameShort}:: Reflects all users added to the organization at the platform level. From this tab, you can add users as organization members and, optionally provide specific organization level roles.
+// {PlatformNameShort}:: Reflects all users added to the organization at the platform level. From this tab, you can add users as organization members and, optionally provide specific organization level roles.
-Automation Execution:: Reflects users that were added directly to the {ControllerName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {ControllerName} and remove those memberships but you can not add new memberships. New organization memberships must be added through the platform.
+// Automation Execution:: Reflects users that were added directly to the {ControllerName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {ControllerName} and remove those memberships but you can not add new memberships. New organization memberships must be added through the platform.
-Automation Content:: Reflects users that were added directly to the {HubName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {HubName} and remove those memberships but you can not add new memberships.
+// Automation Content:: Reflects users that were added directly to the {HubName} organization prior to an upgrade and migration. From this tab, you can only view existing memberships in {HubName} and remove those memberships but you can not add new memberships.
New user memberships to a team must be added at the platform level.
@@ -21,7 +21,7 @@ New user memberships to a team must be added at the platform level.
. From the navigation panel, select {MenuAMTeams}.
. Select the team to which you want to add users.
. Select the *Users* tab.
-. Select the *{PlatformNameShort}* tab and click btn:[Add users] to add user access to the team, or select the *Automation Execution* or *Automation Content* tab to view or remove user access from the team.
+// . Select the *{PlatformNameShort}* tab and click btn:[Add users] to add user access to the team, or select the *Automation Execution* or *Automation Content* tab to view or remove user access from the team.
. Select one or more users from the list by clicking the checkbox next to the name to add them as members of this team.
. Click btn:[Add users].
\ No newline at end of file
From a362cd5ee8133fb2ef5a74b198f680ae92b9e41a Mon Sep 17 00:00:00 2001
From: g-murray <147741787+g-murray@users.noreply.github.com>
Date: Wed, 6 Aug 2025 11:00:15 +0100
Subject: [PATCH 33/71] AAP-50259 PVC edits (#3998) (#4002)
* AAP-50259 PVC edits
* PR suggestions
---
.../platform/assembly-aap-recovery.adoc | 2 +
.../proc-aap-platform-ext-DB-restore.adoc | 65 +++++++++++++++
.../proc-aap-platform-gateway-restore.adoc | 13 ++-
.../proc-aap-platform-pvc-restore.adoc | 80 +++++++++++++++++++
4 files changed, 158 insertions(+), 2 deletions(-)
create mode 100644 downstream/modules/platform/proc-aap-platform-ext-DB-restore.adoc
create mode 100644 downstream/modules/platform/proc-aap-platform-pvc-restore.adoc
diff --git a/downstream/assemblies/platform/assembly-aap-recovery.adoc b/downstream/assemblies/platform/assembly-aap-recovery.adoc
index 7eb4af62e0..74648579e4 100644
--- a/downstream/assemblies/platform/assembly-aap-recovery.adoc
+++ b/downstream/assemblies/platform/assembly-aap-recovery.adoc
@@ -12,6 +12,8 @@ ifdef::context[:parent-context: {context}]
If you lose information on your system or experience issues with an upgrade, you can use the backup resources of your deployment instances. Use the following procedures to recover your {PlatformNameShort} deployment files.
include::platform/proc-aap-platform-gateway-restore.adoc[leveloffset=+1]
+include::platform/proc-aap-platform-pvc-restore.adoc[leveloffset=+1]
+include::platform/proc-aap-platform-ext-DB-restore.adoc[leveloffset=+1]
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
diff --git a/downstream/modules/platform/proc-aap-platform-ext-DB-restore.adoc b/downstream/modules/platform/proc-aap-platform-ext-DB-restore.adoc
new file mode 100644
index 0000000000..a2941ae02b
--- /dev/null
+++ b/downstream/modules/platform/proc-aap-platform-ext-DB-restore.adoc
@@ -0,0 +1,65 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="aap-platform-ext-DB-restore_{context}"]
+
+= Recovering your {PlatformNameShort} deployment from an external database
+
+You can restore an external database on {OCP} using the Operator. Use the following procedure to restore from an external database.
+
+[IMPORTANT]
+====
+Restoring from an external database force drops the database, which overrides your existing external database.
+====
+
+.Prerequisites
+
+* You have an external database.
+* You have installed the {OperatorPlatformNameShort} on {OCPShort}.
+
+.Procedure
+
+. Log in to {OCP}.
+. Navigate to menu:Operators[Installed Operators].
+. Select your {OperatorPlatformNameShort} deployment.
+. Go to your *All Instances* tab, and click btn:[Create New].
+. Select *{PlatformNameShort} Restore* from the list.
+. For *Name* enter the name for the recovery deployment.
+. For *New {PlatformNameShort} Name* enter the new name for your {PlatformNameShort} instance.
+* If restoring to the same name {PlatformNameShort} then you must add `force_drop_db: true` to drop the database on restore.
+. *Backup Source* defaults to *CR*.
+. For *Backup name* enter the name you chose when creating the backup.
+Under *YAML view* paste in the following example:
++
+----
+---
+apiVersion: aap.ansible.com/v1alpha1
+kind: AnsibleAutomationPlatformRestore
+metadata:
+ name: aaprestore
+spec:
+ deployment_name: aap
+ backup_name: aapbackup
+ controller:
+ force_drop_db: true
+----
++
+. Click btn:[Create].
+
+.Verification
+
+Your backups starts restoring under the *AnsibleAutomationPlatformRestores* tab.
+
+[NOTE]
+====
+The recovery is not complete until all the resources are successfully restored. Depending on the size of your database this this can take some time.
+====
+
+To verify that your recovery was successful you can:
+
+. Go to menu:Workloads[Pods].
+. Confirm that all pods are in a *Running* or *Completed* state.
+
+[role="_additional-resources"]
+.Additional resources
+
+* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index[Installing on OpenShift Container Platform]
diff --git a/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc b/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc
index 1f1eb1ad9f..97e89f4736 100644
--- a/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc
+++ b/downstream/modules/platform/proc-aap-platform-gateway-restore.adoc
@@ -3,6 +3,7 @@
[id="aap-platform-gateway-restore_{context}"]
= Recovering your {PlatformNameShort} deployment
+
*{PlatformNameShort}* manages any enabled components (such as, {ControllerName}, {HubName}, and {EDAName}), when you recover *{PlatformNameShort}* you also restore these components.
In previous versions of the {OperatorPlatformNameShort}, it was necessary to create a restore object for each component of the platform.
@@ -13,11 +14,13 @@ Now, you create a single *AnsibleAutomationPlatformRestore* resource, which cre
* EDARestore
.Prerequisites
+
* You must be authenticated with an OpenShift cluster.
* You have installed the {OperatorPlatformNameShort} on the cluster.
* The *AnsibleAutomationPlatformBackups* deployment is available in your cluster.
.Procedure
+
. Log in to {OCP}.
. Navigate to menu:Operators[Installed Operators].
. Select your {OperatorPlatformNameShort} deployment.
@@ -26,9 +29,11 @@ Now, you create a single *AnsibleAutomationPlatformRestore* resource, which cre
. For *Name* enter the name for the recovery deployment.
. For *New {PlatformNameShort} Name* enter the new name for your {PlatformNameShort} instance.
. *Backup Source* defaults to *CR*.
-. For *Backup name* enter the name your chose when creating the backup.
+. For *Backup name* enter the name you chose when creating the backup.
. Click btn:[Create].
+.Verification
+
Your backups starts restoring under the *AnsibleAutomationPlatformRestores* tab.
[NOTE]
@@ -36,8 +41,12 @@ Your backups starts restoring under the *AnsibleAutomationPlatformRestores* tab.
The recovery is not complete until all the resources are successfully restored. Depending on the size of your database this this can take some time.
====
-.Verification
To verify that your recovery was successful you can:
. Go to menu:Workloads[Pods].
. Confirm that all pods are in a *Running* or *Completed* state.
+
+[role="_additional-resources"]
+.Additional resources
+
+* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index[Installing on OpenShift Container Platform]
diff --git a/downstream/modules/platform/proc-aap-platform-pvc-restore.adoc b/downstream/modules/platform/proc-aap-platform-pvc-restore.adoc
new file mode 100644
index 0000000000..8b120ef949
--- /dev/null
+++ b/downstream/modules/platform/proc-aap-platform-pvc-restore.adoc
@@ -0,0 +1,80 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="aap-platform-pvc-restore_{context}"]
+
+= Recovering your {PlatformNameShort} deployment from a PVC
+
+A persistent volume claim (PVC) is a storage volume that stores data for {HubName} and {ControllerName} applications.
+These PVCs are independent of the applications and persist even if an application is deleted.
+You can restore data from a PVC as an alternative to recovering from an *{PlatformNameShort}* backup.
+
+For more information see the _Finding and deleting PVCs_ section of the link:{BaseURL}/red_hat_ansible_automation_platform/{PlayformVers}/html-single/installing_on_openshift_container_platform/index[Installing on OpenShift Container Platform] guide.
+
+
+.Prerequisites
+
+* You have an existing PVC containing a backup.
+* You have installed the {OperatorPlatformNameShort} on {OCP}.
+
+.Procedure
+
+. Log in to {OCP}.
+. Navigate to menu:Operators[Installed Operators].
+. Select your {OperatorPlatformNameShort} deployment.
+. Go to your *All Instances* tab, and click btn:[Create New].
+. Select *{PlatformNameShort} Restore* from the list.
+. For *Name* enter the name for the recovery deployment.
+. For *New {PlatformNameShort} Name* enter the new name for your {PlatformNameShort} instance.
+. For *Backup Source* select *PVC*.
+.. *Backup PVC:* Enter the name of your PVC.
+.. *Backup Directory:* Enter the path to your backup directory on your PVC.
+. For *Backup name* enter the name you chose when creating the backup.
+. Under *YAML view* paste in the following example:
++
+----
+---
+apiVersion: aap.ansible.com/v1alpha1
+kind: AnsibleAutomationPlatformRestore
+metadata:
+ name: aap
+spec:
+ deployment_name: aap
+ backup_source: PVC
+ backup_pvc: aap-backup-claim
+ backup_dir: '/backups/aap-openshift-backup-2025-06-23-18:28:29'
+
+ controller:
+ backup_source: PVC
+ backup_pvc: aap-controller-backup-claim
+ backup_dir: '/backups/tower-openshift-backup-2025-06-23-182910'
+
+ hub:
+ backup_source: PVC
+ backup_pvc: aap-hub-backup-claim
+ backup_dir: '/backups/openshift-backup-2025-06-23-182853'
+ storage_type: file
+
+ eda:
+ backup_source: PVC
+ backup_pvc: aap-eda-backup-claim
+ backup_dir: '/backups/eda-openshift-backup-2025-06-23-18:29:11'
+----
++
+. Click btn:[Create].
+
+.Verification
+
+Your backups restore under the *AnsibleAutomationPlatformRestores* tab.
+
+[NOTE]
+====
+The recovery is not complete until all the resources are successfully restored. Depending on the size of your database this this can take some time.
+====
+
+. Go to menu:Workloads[Pods].
+. Confirm that all pods are in a *Running* or *Completed* state.
+
+[role="_additional-resources"]
+.Additional resources
+
+* link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index[Installing on OpenShift Container Platform]
From 78a571d8a734f9b1959bfbb851b80f0b0ca548e7 Mon Sep 17 00:00:00 2001
From: g-murray <147741787+g-murray@users.noreply.github.com>
Date: Wed, 6 Aug 2025 11:11:44 +0100
Subject: [PATCH 34/71] DITA fixes for ocp backup and performancde guides
(#3999) (#4004)
---
.../platform/con-pod-specification-mods.adoc | 2 +-
.../proc-aap-platform-gateway-backup.adoc | 23 +++++++++++--------
.../platform/proc-customizing-pod-specs.adoc | 2 ++
.../platform/proc-enable-pods-ref-images.adoc | 6 +++--
.../proc-specify-nodes-job-execution.adoc | 2 +-
.../platform/proc-troubleshoot-same-name.adoc | 1 +
.../platform/ref-assign-pods-to-nodes.adoc | 2 +-
.../modules/platform/ref-resource-types.adoc | 6 ++---
.../platform/ref-set-custom-pod-timeout.adoc | 5 ++--
9 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/downstream/modules/platform/con-pod-specification-mods.adoc b/downstream/modules/platform/con-pod-specification-mods.adoc
index 9aa252c450..8fedbc8400 100644
--- a/downstream/modules/platform/con-pod-specification-mods.adoc
+++ b/downstream/modules/platform/con-pod-specification-mods.adoc
@@ -18,7 +18,7 @@ This custom document uses custom fields, such as `ImagePullSecrets`, that can be
A full list of options can be found in the link:https://docs.openshift.com/online/pro/architecture/core_concepts/pods_and_services.html[Openshift Online] documentation.
-.Example of a pod that provides a long-running service.
+*Example of a pod that provides a long-running service*
This example demonstrates many features of pods, most of which are discussed in other topics and thus only briefly mentioned here:
diff --git a/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc b/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc
index eae1edbe28..628ac720f2 100644
--- a/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc
+++ b/downstream/modules/platform/proc-aap-platform-gateway-backup.adoc
@@ -3,14 +3,23 @@
[id="aap-platform-gateway-backup_{context}"]
= Backing up your {PlatformNameShort} deployment
+
Regularly backing up your *{PlatformNameShort}* deployment is vital to protect against unexpected data loss and application errors. *{PlatformNameShort}* hosts any enabled components (such as, {ControllerName}, {HubName}, and {EDAName}), when you back up *{PlatformNameShort}* the operator will also back up these components.
+[NOTE]
+====
+{OperatorPlatformNameShort} creates a PersistentVolumeClaim (PVC) for your {PlatformNameShort} Backup automatically.
+You can use your own pre-created PVC by using the `backup_pvc` spec and specifying your PVC.
+====
+
.Prerequisites
+
* You must be authenticated on OpenShift cluster.
* You have installed {OperatorPlatformNameShort} on the cluster.
* You have deployed a *{PlatformNameShort}* instance using the {OperatorPlatformNameShort}.
.Procedure
+
. Log in to {OCP}.
. Navigate to menu:Operators[Installed Operators].
. Select your {OperatorPlatformNameShort} deployment.
@@ -24,10 +33,8 @@ When creating the *{PlatformNameShort} Backup* resource it also creates backup r
+
. In the *Name* field, enter a name for the backup.
. In the *Deployment name* field, enter the name of the deployed {PlatformNameShort} instance being backed up. For example if your {PlatformNameShort} deployment must be backed up and the deployment name is aap, enter 'aap' in the *Deployment name* field.
-. Click btn:[Create].
-
-This results in an *AnsibleAutomationPlatformBackup* resource. The the resource YAML is similar to the following:
-
+. Click btn:[Create]. This results in an *AnsibleAutomationPlatformBackup* resource similar to the following:
++
----
apiVersion: aap.ansible.com/v1alpha1
kind: AnsibleAutomationPlatformBackup
@@ -38,14 +45,10 @@ spec:
no_log: true
deployment_name: aap
----
-
-[NOTE]
-====
-{OperatorPlatformNameShort} creates a PersistentVolumeClaim (PVC) for your {PlatformNameShort} Backup automatically.
-You can use your own pre-created PVC by using the `backup_pvc` spec and specifying your PVC.
-====
++
.Verification
+
To verify that your backup was successful you can:
. Log in to {OCP}.
diff --git a/downstream/modules/platform/proc-customizing-pod-specs.adoc b/downstream/modules/platform/proc-customizing-pod-specs.adoc
index b964ce0af1..f09dc3562d 100644
--- a/downstream/modules/platform/proc-customizing-pod-specs.adoc
+++ b/downstream/modules/platform/proc-customizing-pod-specs.adoc
@@ -13,6 +13,8 @@ You can use the following procedure to customize the pod.
. Click btn:[Save].
. Optional: Click btn:[Expand] to view the entire customization window if you want to provide additional customizations.
+.Next steps
+
The image used at job launch time is determined by the {ExecEnvShort} associated with the job.
If a Container Registry credential is associated with the {ExecEnvShort}, then {ControllerName} uses `ImagePullSecret` to pull the image.
If you prefer not to give the service account permission to manage secrets, you must pre-create the `ImagePullSecret`, specify it on the pod specification, and omit any credential from the {ExecEnvShort} used.
diff --git a/downstream/modules/platform/proc-enable-pods-ref-images.adoc b/downstream/modules/platform/proc-enable-pods-ref-images.adoc
index 16e9a8df73..dcf332dd92 100644
--- a/downstream/modules/platform/proc-enable-pods-ref-images.adoc
+++ b/downstream/modules/platform/proc-enable-pods-ref-images.adoc
@@ -11,12 +11,12 @@ Alternatively, if the `ImagePullSecret` already exists in the container group na
Note that the image used by a job running in a container group is always overridden by the Execution Environment associated with the job.
-.Use of pre-created ImagePullSecrets (Advanced)
+*Use of pre-created ImagePullSecrets (Advanced)*
If you want to use this workflow and pre-create the `ImagePullSecret`, you can source the necessary information to create it from your local `.dockercfg` file on a system that has previously accessed a secure container registry.
+The `.dockercfg file`, or `$HOME/.docker/config.json` for newer Docker clients, is a Docker credentials file that stores your information if you have previously logged into a secured or insecure registry.
.Procedure
-The `.dockercfg file`, or `$HOME/.docker/config.json` for newer Docker clients, is a Docker credentials file that stores your information if you have previously logged into a secured or insecure registry.
. If you already have a `.dockercfg` file for the secured registry, you can create a secret from that file by running the following command:
+
@@ -66,6 +66,8 @@ $ oc secrets link builder
+
. Optional: For builds, you must also reference the secret as the pull secret from within your build configuration.
+.Verification
+
When the container group is successfully created, the *Details* tab of the newly created container group remains. This allows you to review and edit your container group information.
This is the same menu that is opened if you click the btn:[Edit] icon *✎* from the *Instance Group* link.
You can also edit instances and review jobs associated with this instance group.
diff --git a/downstream/modules/platform/proc-specify-nodes-job-execution.adoc b/downstream/modules/platform/proc-specify-nodes-job-execution.adoc
index 4c247bb4c9..aecff7fd0e 100644
--- a/downstream/modules/platform/proc-specify-nodes-job-execution.adoc
+++ b/downstream/modules/platform/proc-specify-nodes-job-execution.adoc
@@ -63,7 +63,7 @@ kubectl get nodes --show-labels
You can see that the `worker0` node now has a `disktype=ssd` label.
+
. In the {ControllerName} UI, specify that label in the metadata section of your customized pod specification in the container group.
-
++
[options="nowrap" subs="+quotes,attributes"]
----
apiVersion: v1
diff --git a/downstream/modules/platform/proc-troubleshoot-same-name.adoc b/downstream/modules/platform/proc-troubleshoot-same-name.adoc
index b5756e911f..2afdf8cf4b 100644
--- a/downstream/modules/platform/proc-troubleshoot-same-name.adoc
+++ b/downstream/modules/platform/proc-troubleshoot-same-name.adoc
@@ -10,6 +10,7 @@ The name specified for the new AutomationController custom resource must not mat
If your AutomationController customer resource matches an existing deployment, perform the following steps to resolve the issue.
.Procedure
+
. Delete the existing AutomationController and the associated postgres PVC:
+
-----
diff --git a/downstream/modules/platform/ref-assign-pods-to-nodes.adoc b/downstream/modules/platform/ref-assign-pods-to-nodes.adoc
index 9bdb82a788..cfc28cf2e6 100644
--- a/downstream/modules/platform/ref-assign-pods-to-nodes.adoc
+++ b/downstream/modules/platform/ref-assign-pods-to-nodes.adoc
@@ -30,7 +30,7 @@ For example, with the `maxSkew` parameter of this option set to `100`, this mean
So if there are three matching compute nodes and three pods, one pod will be assigned to each compute node.
This parameter helps prevent the control plane pods from competing for resources with each other.
-.Example of a custom configuration for constraining controller pods to specific nodes
+*Example of a custom configuration for constraining controller pods to specific nodes*
[options="nowrap" subs="+quotes,attributes"]
----
diff --git a/downstream/modules/platform/ref-resource-types.adoc b/downstream/modules/platform/ref-resource-types.adoc
index ee63b283fb..2b4ff3d0cc 100644
--- a/downstream/modules/platform/ref-resource-types.adoc
+++ b/downstream/modules/platform/ref-resource-types.adoc
@@ -30,7 +30,7 @@ For a particular resource, a pod resource request or limit is the sum of the res
== Resource units in Kubernetes
-.CPU resource units
+*CPU resource units*
Limits and requests for CPU resources are measured in CPU units.
In Kubernetes, one CPU unit is equal to one physical processor core, or one virtual core, depending on whether the node is a physical host or a virtual machine running inside a physical machine.
@@ -47,7 +47,7 @@ To specify CPU units less than 1.0 or 1000m you must use the milliCPU form.
For example, use 5m, not 0.005 CPU.
====
-.Memory resource units
+*Memory resource units*
Limits and requests for memory are measured in bytes.
You can express memory as a plain integer or as a fixed-point number using one of these quantity suffixes: E, P, T, G, M, k.
You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki.
@@ -61,7 +61,7 @@ For example, the following represent roughly the same value:
Pay attention to the case of the suffixes.
If you request 400m of memory, this is a request for 0.4 bytes, not 400 mebibytes (400Mi) or 400 megabytes (400M).
-.Example CPU and memory specification
+*Example CPU and memory specification*
The following cluster has enough free resources to schedule a task pod with a dedicated 100m CPU and 250Mi.
The cluster can also withstand bursts over that dedicated usage up to 2000m CPU and 2Gi memory.
diff --git a/downstream/modules/platform/ref-set-custom-pod-timeout.adoc b/downstream/modules/platform/ref-set-custom-pod-timeout.adoc
index 75ad2e90ef..9a110e2652 100644
--- a/downstream/modules/platform/ref-set-custom-pod-timeout.adoc
+++ b/downstream/modules/platform/ref-set-custom-pod-timeout.adoc
@@ -3,6 +3,7 @@
[id="ref-set-custom-pod-timeout_{context}"]
= Extra settings
+
With `extra_settings`, you can pass many custom settings by using the awx-operator.
The parameter `extra_settings` is appended to `/etc/tower/settings.py` and can be an alternative to the `extra_volumes` parameter.
@@ -12,7 +13,7 @@ The parameter `extra_settings` is appended to `/etc/tower/settings.py` and can b
| `extra_settings` | Extra settings | ‘’
|====
-.Example configuration of `extra_settings` parameter
+*Example configuration of `extra_settings` parameter*
[options="nowrap" subs="+quotes,attributes"]
----
@@ -28,7 +29,7 @@ The parameter `extra_settings` is appended to `/etc/tower/settings.py` and can b
value: "500"
----
-.Custom pod timeouts
+*Custom pod timeouts*
A container group job in {ControllerName} transitions to the `running` state just before you submit the pod to the Kubernetes API.
{ControllerNameStart} then expects the pod to enter the `Running` state before `AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT` seconds has elapsed.
From 1cb99c02d9375e3d39b27c23ac89505638205172 Mon Sep 17 00:00:00 2001
From: Aine Riordan <44700011+ariordan-redhat@users.noreply.github.com>
Date: Wed, 6 Aug 2025 11:43:01 +0100
Subject: [PATCH 35/71] AAP-44556 Add registry check in devtools doc (#4000)
(#4006)
---
...roc-devtools-setup-registry-redhat-io.adoc | 29 +++++++++----------
1 file changed, 14 insertions(+), 15 deletions(-)
diff --git a/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc b/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc
index bc6f46e694..abfa51767e 100644
--- a/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc
+++ b/downstream/modules/devtools/proc-devtools-setup-registry-redhat-io.adoc
@@ -8,9 +8,6 @@ All container images available through the Red Hat container catalog are hosted
`registry.redhat.io`.
The registry requires authentication for access to images.
-To use the `registry.redhat.io` registry, you must have a Red Hat login.
-This is the same account that you use to log in to the Red Hat Customer Portal (access.redhat.com) and manage your Red Hat subscriptions.
-
[NOTE]
====
If you are planning to install the {ToolsName} on a container inside {VSCode},
@@ -22,20 +19,26 @@ or the `devcontainer` to use as an execution environment,
you must log in from a terminal prompt within the `devcontainer` from a terminal inside {VSCode}.
====
-You can use the `podman login` or `docker login` commands with your credentials to access content on the registry.
+.Prerequisites
+
+* To use the `registry.redhat.io` registry, you must have a Red Hat login.
++
+This is the same account that you use to log in to the Red Hat Customer Portal (access.redhat.com) and manage your Red Hat subscriptions.
+
+.Procedure
-Podman::
+. Check whether you are already logged in to the `registry.redhat.io` registry:
+
----
-$ podman login registry.redhat.io
-Username: my__redhat_username
-Password: ***********
+$ podman login --get-login registry.redhat.io
----
-Docker::
++
+The command output displays your Red Hat login if you are logged in to `registry.redhat.io`.
+. If you are not logged in to `registry.redhat.io`, use the `podman login` command with your credentials to access content on the registry.
+
----
-$ docker login registry.redhat.io
-Username: my__redhat_username
+$ podman login registry.redhat.io
+Username: my_redhat_username
Password: ***********
----
@@ -43,8 +46,4 @@ For more information about Red Hat container registry authentication, see
link:https://access.redhat.com/RegistryAuthentication[Red Hat Container Registry Authentication]
on the Red Hat customer portal.
-// * If you are an organization administrator, you can create profiles for users in your organization and configure Red Hat customer portal access permissions for them.
-// Refer to link:https://access.redhat.com/start/learn:get-set-red-hat/resource/resources:create-and-manage-other-users[Create and manage other users] on the Red Hat customer portal for information.
-// * If you are a member of an organization, ask your administrator to create a Red Hat customer portal account for you.
-//Troubleshooting link:https://access.redhat.com/articles/3560571[Troubleshooting Authentication Issues with `registry.redhat.io`]
From 47daef8ebfc1afcbaa38b0d02d59230283705295 Mon Sep 17 00:00:00 2001
From: Aine Riordan <44700011+ariordan-redhat@users.noreply.github.com>
Date: Thu, 7 Aug 2025 08:53:24 +0100
Subject: [PATCH 36/71] AAP-44558 Devtools: Add info about VSCode workspaces
(#4007) (#4011)
---
.../devtools/proc-devtools-extension-settings.adoc | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/downstream/modules/devtools/proc-devtools-extension-settings.adoc b/downstream/modules/devtools/proc-devtools-extension-settings.adoc
index 60592de68b..5b48be6d9c 100644
--- a/downstream/modules/devtools/proc-devtools-extension-settings.adoc
+++ b/downstream/modules/devtools/proc-devtools-extension-settings.adoc
@@ -9,6 +9,7 @@ The Ansible extension supports multiple configuration options.
You can configure the settings for the extension on a user level, on a workspace level, or for a particular directory.
User-based settings are applied globally for any instance of VS Code that is opened.
+A {VSCode} workspace is a collection of one or more folders that you can open in a single {VSCode} window.
Workspace settings are stored within your workspace and only apply when the current workspace is opened.
It is useful to configure settings for your workspace for the following reasons:
@@ -18,15 +19,21 @@ you can customize your Ansible development environment for individual projects w
You can have different settings for a Python project, an Ansible project, and a C++ project, each optimized for the respective stack without the need to manually reconfigure settings each time you switch projects.
* If you include workspace settings when setting up version control for a project you want to share with your team, everyone uses the same configuration for that project.
+.Prerequisites
+
+* Open a workspace or folder, or create a new folder, in {VSCode} using the menu:File[Open Folder] menu.
+This is necessary because the file that stores settings preferences for workspaces is specific to a folder or workspace.
+
.Procedure
. Open the Ansible extension settings:
-.. Click the 'Extensions' icon in the activity bar.
+.. Click the image:vscode-extensions-icon.png[Extensions,15,15] *Extensions* icon in the activity bar.
.. Select the Ansible extension, and click the 'gear' icon and then *Extension Settings* to display the extension settings.
+
Alternatively, click menu:Code[Settings>Settings] to open the *Settings* page.
.. Enter `Ansible` in the search bar to display the settings for the extension.
. Select the *Workspace* tab to configure your settings for the current {VSCode} workspace.
+** If the *Workspace* tab is not displayed, open a folder or create a new folder using the menu:File[Open Folder] menu.
. The Ansible extension settings are pre-populated.
Modify the settings to suit your requirements:
** Check the menu:Ansible[Validation > Lint: Enabled] box to enable ansible-lint.
@@ -34,5 +41,8 @@ Modify the settings to suit your requirements:
** Specify the {ExecEnvShort} image you want to use in the *Ansible > Execution Environment: image* field.
** To use {LightspeedShortName}, check the *Ansible > Lightspeed: Enabled* box, and enter the URL for Lightspeed.
-The settings are documented on the link:https://marketplace.visualstudio.com/items?itemName=redhat.ansible[Ansible {VSCode} Extension by Red Hat page] in the VisualStudio marketplace documentation.
+[role="_additional-resources"]
+.Additional resources
+* For information about Ansible {VSCode} extension settings, see the link:https://marketplace.visualstudio.com/items?itemName=redhat.ansible[Ansible {VSCode} Extension by Red Hat page] in the VisualStudio marketplace documentation.
+* For information about {VSCode} workspaces, see link:https://code.visualstudio.com/docs/editing/workspaces/workspaces[What is a {VSCode} workspace?] in the Visual Studio Code documentation.
From ba41e239ac935284bf8327be5211962a641a7108 Mon Sep 17 00:00:00 2001
From: Michelle McCausland <141345897+michellemacrh@users.noreply.github.com>
Date: Thu, 7 Aug 2025 11:42:55 +0100
Subject: [PATCH 37/71] Add External Redis support statement (#4013) (#4016)
External Redis is currently not supported for CONT and RPM topologies.
Clarify external Redis support for CONT and RPM topologies
Affects: `titles/topologies`
https://issues.redhat.com/browse/AAP-50975
---
downstream/modules/topologies/ref-rpm-b-env-a.adoc | 3 ++-
downstream/snippets/redis-colocation-containerized.adoc | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/downstream/modules/topologies/ref-rpm-b-env-a.adoc b/downstream/modules/topologies/ref-rpm-b-env-a.adoc
index 71e4194454..504c803bae 100644
--- a/downstream/modules/topologies/ref-rpm-b-env-a.adoc
+++ b/downstream/modules/topologies/ref-rpm-b-env-a.adoc
@@ -31,7 +31,8 @@ include::snippets/rpm-tested-vm-config.adoc[]
[NOTE]
====
-6 VMs are required for a Redis high availability (HA) compatible deployment. Redis can be colocated on each {PlatformNameShort} component VM except for {ControllerName}, execution nodes, or the PostgreSQL database.
+* 6 VMs are required for a Redis high availability (HA) compatible deployment. Redis can be colocated on each {PlatformNameShort} component VM except for {ControllerName}, execution nodes, or the PostgreSQL database.
+* External Redis is not supported for RPM-based deployments of {PlatformNameShort}.
====
== Tested system configurations
diff --git a/downstream/snippets/redis-colocation-containerized.adoc b/downstream/snippets/redis-colocation-containerized.adoc
index d4de91f5d0..c5f748107d 100644
--- a/downstream/snippets/redis-colocation-containerized.adoc
+++ b/downstream/snippets/redis-colocation-containerized.adoc
@@ -1,2 +1,3 @@
//This snippet details the colocation configuration for a containerized install of AAP - note that it can be colocated with controller.
* 6 VMs are required for a Redis high availability (HA) compatible deployment. When installing {PlatformNameShort} with the containerized installer, Redis can be colocated on any {PlatformNameShort} component VMs of your choice except for execution nodes or the PostgreSQL database. They might also be assigned VMs specifically for Redis use.
+* External Redis is not supported for containerized {PlatformNameShort}.
\ No newline at end of file
From a6682ea030aebe596800ed432759e5de6bf16524 Mon Sep 17 00:00:00 2001
From: EMcWhinn <122449381+EMcWhinn@users.noreply.github.com>
Date: Thu, 7 Aug 2025 11:46:56 +0100
Subject: [PATCH 38/71] Adding OAuth2 token creation chapter (#4012) (#4015)
Document External User OAuth2 Token Security Considerations
https://issues.redhat.com/browse/AAP-50313
Affects `central-auth`
---
...ssembly-gw-token-based-authentication.adoc | 22 ++++++++++++++++---
.../con-gw-manage-oauth2-external-users.adoc | 17 ++++++++++++++
.../proc-gw-enable-oauth2-external-users.adoc | 19 ++++++++++++++++
.../proc-gw-oauth2-security-controls.adoc | 21 ++++++++++++++++++
4 files changed, 76 insertions(+), 3 deletions(-)
create mode 100644 downstream/modules/platform/con-gw-manage-oauth2-external-users.adoc
create mode 100644 downstream/modules/platform/proc-gw-enable-oauth2-external-users.adoc
create mode 100644 downstream/modules/platform/proc-gw-oauth2-security-controls.adoc
diff --git a/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc b/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc
index 8df2ca1a6d..fb6b35a12b 100644
--- a/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc
+++ b/downstream/assemblies/platform/assembly-gw-token-based-authentication.adoc
@@ -25,24 +25,40 @@ You can customize this setting to meet your specific requirements by modifying t
OAUTH2_PROVIDER__ACCESS_TOKEN_EXPIRE_SECONDS = 31536000
----
-For more information on the `settings.py` file and how it can be used to configure aspects of the platform, see link:{URLAAPOperationsGuide}/aap-advanced-config#settings-py_advanced-config[`settings.py`] in {TitleAAPOperationsGuide}.
+For more information about the `settings.py` file and how it can be used to configure aspects of the platform, see link:{URLAAPOperationsGuide}/aap-advanced-config#settings-py_advanced-config[`settings.py`] in {TitleAAPOperationsGuide}.
-For more information on the OAuth2 specification, see link:https://datatracker.ietf.org/doc/html/rfc6749[The OAuth 2.0 Authorization Framework].
+For more information about the OAuth2 specification, see link:https://datatracker.ietf.org/doc/html/rfc6749[The OAuth 2.0 Authorization Framework].
-For more information on using the `manage` utility to create tokens, see xref:ref-controller-token-session-management[Token and session management].
+For more information about using the `manage` utility to create tokens, see xref:ref-controller-token-session-management[Token and session management].
include::assembly-controller-applications.adoc[leveloffset=+1]
+
include::platform/proc-controller-apps-create-tokens.adoc[leveloffset=+1]
+
include::platform/ref-controller-app-token-functions.adoc[leveloffset=+2]
+
include::platform/ref-controller-refresh-existing-token.adoc[leveloffset=+3]
+
include::platform/ref-controller-revoke-access-token.adoc[leveloffset=+3]
+
include::platform/ref-controller-token-session-management.adoc[leveloffset=+2]
+
include::platform/ref-controller-create-oauth2-token.adoc[leveloffset=+3]
+
include::platform/ref-controller-revoke-oauth2-token.adoc[leveloffset=+3]
+
include::platform/ref-controller-clear-tokens.adoc[leveloffset=+3]
+
//[emcwhinn - Temporarily hiding expire sessions module as it does not yet exist for gateway as per AAP-35735]
//include::platform/ref-controller-expire-sessions.adoc[leveloffset=+3]
+
include::platform/ref-controller-clear-sessions.adoc[leveloffset=+3]
+include::platform/con-gw-manage-oauth2-external-users.adoc[leveloffset=+1]
+
+include::platform/proc-gw-enable-oauth2-external-users.adoc[leveloffset=+2]
+
+include::platform/proc-gw-oauth2-security-controls.adoc[leveloffset=+2]
+
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
\ No newline at end of file
diff --git a/downstream/modules/platform/con-gw-manage-oauth2-external-users.adoc b/downstream/modules/platform/con-gw-manage-oauth2-external-users.adoc
new file mode 100644
index 0000000000..eb4315bf0a
--- /dev/null
+++ b/downstream/modules/platform/con-gw-manage-oauth2-external-users.adoc
@@ -0,0 +1,17 @@
+:_mod-docs-content-type: CONCEPT
+
+[id="gw-manage-oauth2-external-users"]
+
+= Manage OAuth2 token creation for external users
+
+{PlatformName} is designed with a default security posture that prevents users authenticated through external providers, such as LDAP, SAML, or SSO, from creating OAuth2 tokens for programmatic API access.
+When an external user tries to generate such a token, the following message appears:
+`403: Forbidden' error with the message: '(access_denied) OAuth2 Tokens cannot be created by users associated with an external authentication provider`.
+
+This default behavior is a deliberate security measure.
+{PlatformNameShort} prioritizes centralized control over token generation, which encourages administrators to select the appropriate method for enabling OAuth 2.0 user token generation for external authentication providers.
+
+It is important to understand that an OAuth2 token is created within {PlatformNameShort}, and {PlatformNameShort} itself manages its lifecycle, including its expiration.
+This lifecycle is independent of the user's session with their external Identity Provider (IdP).
+For example, if a user generates an {PlatformNameShort} token and their account is later disabled in the external IdP, the {PlatformNameShort} token remains valid until it expires or is manually revoked.
+Being aware of this interaction is crucial for a secure configuration, as it highlights the need for compensating controls if you enable token creation for external users.
diff --git a/downstream/modules/platform/proc-gw-enable-oauth2-external-users.adoc b/downstream/modules/platform/proc-gw-enable-oauth2-external-users.adoc
new file mode 100644
index 0000000000..5aa59ea50e
--- /dev/null
+++ b/downstream/modules/platform/proc-gw-enable-oauth2-external-users.adoc
@@ -0,0 +1,19 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="gw-enable-oauth2-external-users"]
+
+= Enabling OAuth2 token creation for external users
+
+To enable external users to create OAuth2 tokens, change the appropriate setting in your {PlatformNameShort} environment.
+Ensure the implementation of compensating security controls after enabling this setting.
+
+.Procedure
+
+. From the navigation panel, go to menu:{MenuAEAdminSettings}[Platform gateway].
+. Click btn:[Edit platform gateway] settings.
+. Change the *Allow external users to create OAuth2 tokens* setting to *Enabled*.
+. Click btn:[Save platform gateway settings].
+
+.Next steps
+
+Implement the recommended security controls as described in _Implementing security controls for external user OAuth2 tokens_.
diff --git a/downstream/modules/platform/proc-gw-oauth2-security-controls.adoc b/downstream/modules/platform/proc-gw-oauth2-security-controls.adoc
new file mode 100644
index 0000000000..4c0ed96336
--- /dev/null
+++ b/downstream/modules/platform/proc-gw-oauth2-security-controls.adoc
@@ -0,0 +1,21 @@
+:_mod-docs-content-type: PROCEDURE
+
+[id="gw-oauth2-security-controls"]
+
+= Implementing security controls for external user OAuth2 tokens
+
+After enabling OAuth2 token creation for external users, implement the following compensating controls to keep a strong security posture.
+
+.Procedure
+
+* *Limit token lifetime*: Configure a shorter duration for OAuth2 tokens to reduce the window of exposure.
+** In your {PlatformNameShort} settings, adjust the `OAUTH2_ACCESS_TOKEN_EXPIRE_SECONDS value`.
+A value of 28800 (8 hours) is recommended, limiting token validity to a standard workday.
+* *Enforce strict role-based access control (RBAC)*: Grant users only the minimum necessary permissions.
+** Assign users who create tokens to *Teams* with highly restrictive roles.
+Avoid granting broad permissions that could lead to privilege escalation.
+* *Establish a clear offboarding process*: Integrate token revocation into your organizational offboarding procedures.
+Your HR and IT offboarding processes must include a step for an {PlatformNameShort} administrator to revoke all active tokens for a departing user.
+Tokens can be manually revoked from the user's profile under the *Tokens* tab.
+* *Audit and monitor*: Regularly review token-related activities for legitimacy in the *Activity Stream*.
+
From 7d69d0712c3fc49973def720f15b60e2f8b06def Mon Sep 17 00:00:00 2001
From: g-murray <147741787+g-murray@users.noreply.github.com>
Date: Thu, 7 Aug 2025 14:47:47 +0100
Subject: [PATCH 39/71] 2.6 AAP-49196 2.6 OCP updates (#4014) (#4019)
---
.../platform/assembly-aap-migration.adoc | 2 +-
.../assembly-install-aap-gateway.adoc | 2 +-
.../platform/assembly-operator-upgrade.adoc | 3 +-
downstream/images/AAP-2.6-channels.png | Bin 0 -> 151813 bytes
downstream/images/AAP-2.6-view.png | Bin 0 -> 117150 bytes
.../platform/con-ocp-supported-install.adoc | 2 +-
.../con-operator-channel-upgrade.adoc | 4 +--
.../con-operator-upgrade-overview.adoc | 6 ++--
.../proc-install-cli-aap-operator.adoc | 4 +--
.../proc-operator-deploy-central-config.adoc | 2 +-
.../platform/proc-operator-upgrade.adoc | 26 ++++++++++++++----
11 files changed, 34 insertions(+), 17 deletions(-)
create mode 100644 downstream/images/AAP-2.6-channels.png
create mode 100644 downstream/images/AAP-2.6-view.png
diff --git a/downstream/assemblies/platform/assembly-aap-migration.adoc b/downstream/assemblies/platform/assembly-aap-migration.adoc
index aa2f96cf94..4e602e5ad1 100644
--- a/downstream/assemblies/platform/assembly-aap-migration.adoc
+++ b/downstream/assemblies/platform/assembly-aap-migration.adoc
@@ -16,7 +16,7 @@ You can use the link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers
[NOTE]
====
-Upgrades of {EDAName} version 2.4 to 2.5 are not supported. Database migrations between {EDAName} 2.4 and {EDAName} 2.5 are not compatible.
+Upgrades of {EDAName} version 2.4 to {PlatformVers} are not supported. Database migrations between {EDAName} 2.4 and {EDAName} {PlatformVers} are not compatible.
====
//[gmurray 07/14/25 ]The following modules will need to be deprecated eventually, commenting out for now incase we need to roll back, I also need to confirm which are used in 2.4. Best thing would be to archive these when we cease supporting 2.4
diff --git a/downstream/assemblies/platform/assembly-install-aap-gateway.adoc b/downstream/assemblies/platform/assembly-install-aap-gateway.adoc
index aad2e2244d..da9855780e 100644
--- a/downstream/assemblies/platform/assembly-install-aap-gateway.adoc
+++ b/downstream/assemblies/platform/assembly-install-aap-gateway.adoc
@@ -22,7 +22,7 @@ If you have not installed {OperatorPlatformNameShort} see link:{BaseURL}/red_hat
[NOTE]
====
-{GatewayStart} is only available under {OperatorPlatformNameShort} version 2.5. Every component deployed under {OperatorPlatformNameShort} 2.5 defaults to version 2.5.
+{GatewayStart} is only available under {OperatorPlatformNameShort} version {PlatformVers}. Every component deployed under {OperatorPlatformNameShort} {PlatformVers} defaults to version {PlatformVers}.
====
If you have the {OperatorPlatformNameShort} and some or all of the {PlatformNameShort} components installed see link:{BaseURL}/red_hat_ansible_automation_platform/{PlatformVers}/html-single/installing_on_openshift_container_platform/index#operator-deploy-central-config_install-aap-gateway[Deploying the {Gateway} with existing {PlatformNameShort} components] for how to proceed.
diff --git a/downstream/assemblies/platform/assembly-operator-upgrade.adoc b/downstream/assemblies/platform/assembly-operator-upgrade.adoc
index 3703232a7c..732d45126a 100644
--- a/downstream/assemblies/platform/assembly-operator-upgrade.adoc
+++ b/downstream/assemblies/platform/assembly-operator-upgrade.adoc
@@ -25,7 +25,8 @@ include::platform/proc-operator-upgrade.adoc[leveloffset=+1]
include::platform/proc-operator-create_crs.adoc[leveloffset=+1]
-include::assembly-aap-post-upgrade.adoc[leveloffset=+1]
+//[gmurray] This module is not relevant for 2.6 docs, but still in use in 2.5.
+//include::assembly-aap-post-upgrade.adoc[leveloffset=+1]
ifdef::parent-context[:context: {parent-context}]
ifndef::parent-context[:!context:]
diff --git a/downstream/images/AAP-2.6-channels.png b/downstream/images/AAP-2.6-channels.png
new file mode 100644
index 0000000000000000000000000000000000000000..2452d31dc488a103b9e663fbf9b38bd258347124
GIT binary patch
literal 151813
zcmeFZ_dl2Y|2M7_rHl}g5JHkwA!P3)B&lR4MRxW`vJyfwa`zpYQh{xPH5?^X7efpQrJ9y^iPcd_Erc^*DmHG*u`mm?=m|NGR1*6?I5RNH61m
z&O6BP9crqm3H;wSM|m~f9r(v{hj|eGd!Lh%zSB8d3nv#7hpQylY;12{J?d!YaP_K<
zqou9W6ls|>e(5OjO9~EGO`NXV+OX?hzj>8J*YXy-kQlq%t!wNeLLw*Fg~TOJoRm0m
zf?e(myV^P3h8IU(l8~^Is42?nx;_~%xnTFC)opXLn}R=wvNNZsJo)J7WS6buv@HLZJdp7-7xMKvzj+rMvVyZBZ!
zSWlH~3T(JGvaxT8jNB3Vrtw?6PidC-2H1{QUWI`4FqrDT|r?!HU1;mLI4;XOC9bK6j3k
zDo}y3GLaVVE1yJjr-Gd$V#k|y{@GN5B&{8}`nv^UR=+V^
z@RcR;E@P`Orr~GUWtz)GNlE!@#+7<#mF)ulaCt&0MO&4N_S2Aq>htd^r`}3p!|&L(
z)vl=flSLlh`Q_`^ifU>*ucZ3i!#8#WR@xOB>>%^wJ$h8h%#8DMeSO8q9NYY~bc?yj
zzA95;d;~7qozu&nKU!Oxdp~ieYVoo&?fy72cQ8&vo|hhHOO}_uCzmx@i&ETiRKQqn=s#iCw+S@vg9O=-7HbA#}OSGyk?YU1t9ipy`Zq{o*|
zIz3+PQYJ%aXsG4Qn@TP&VyQZZv0$v9nPOsMW_moPOgueV=I7_d9fxj;+=ljATU$RXEZny_O}!~AdX<+g(N6q+boAb}g@Q8wI`ca^
z=~ca(>%9soP2a3mcNqUv-?eL(>*_>-NnZQ6pFaaa7zLA4QU=22Y5e8gT&3Rq;Ye)6
zLYZ8>x;-y1Pbj{@P9kA?bv;Nq1vgVQt@)Ro__TXTTX(mAyLHX@IKM96-Va4ZO|7kg
zlan?asj=OE8XC^r4-e;L4wcVtP0{C<{?q)ti@eU9mLX(2oA=Ve;o;$&!or=|tszbl
zp&HzN__91sQPxY16>n)dkHwYC)Ju8(eEY=2#P88jIXAcETnelBhJfeK4~>tH%ko8Y
zMAdz}wp&(;wRUMq+`_`bSR{^y!v9&z4PhanX)TWWK079xg8iClbpG;Nu#w69w#X+<
zmycj8k2^~~dh|%>V#D%w@n8y*%$_2cKyf94L*bJiI*K9TU%B*`XIxnfvl*kJqL>aGsBVZCt4n@d*(b|R^B_o1I#ES5
z{?O>|`1p9NY+Faioz4kmoL?)eBM;RN>GLySt4gs_J3BkyF6nmuJa7hkBT0*0Fv4j4
z&xM?JYgO(jnvm+3FJH=Dzkc|=fq+QdZd49@y(=ilefN%xM2Km3TYI}4YQwc_Jg;88
zD(|xkyIPE6ED*B`pR3SdQonSdSydp$uXjp?okmuiMH!z-oWH)YyY
z>iF^FvT4oT2j9gy4QTOhE9eaMlnyr*QmxTg{#)Lq5?CoW95KKZ-@w7N+Z5|EH#diq
z&U^T<+|{c?Q%_XSKG(r6B0dS;;LI7yD?emRQQy|QW!}AeM?w;;c<29Ht)il$_ulIg
z-plnyre(&XJ11t;v7T;x|BF8<(z5UtR+Bf5xlaq6M
z%5y{#M=3y&!|g|!@-h=W#dg#XgCb|j?2-j$QBlzcjU!3Ad`+F5AunEV>t48^z!&{-
zV5WT3>Eo!AsiPyEXnbgD5%!ufZl%v^flo-I&&FJtqhv$4u2f>&bd7~<-w44Hc=T{V$gx;3@BC)G`RQRHNtEA=9Y6xNvtwlS+8oM|l@QeCaoRqRH0
z?AS456ei2#mR44k&1YpoD=OrUi;4L)SoG#YeM(OCMJ@H
zRBw%`SMu?ZDV@)>O-f4IqLcpjlmCf4*&EldA3QAEZq1ONpI;R>{eEu#aIJz!+}#EX
zbY?#^VQl-y@$pl9Z*yX(UbNhpS#I=Uy?OH{Hio6Ob#aPv;6H!}RCtn#b${;O-DEhx
z0hyUcm)5;;0RJv7nz_0%ojG%6v4!W(9!u={l9G}t``5+AEY_l{TDbt45%ohvZ6)wQ
z16g>jAF3Q{c;LNz6j;KPvNE=zp&=FJPv}?Z@*3Q9*nGu?f>;&Y5fl#WrJy(kTU)^!
zH*Qenn_e+8s*OH!fo3<^LE7MN9UT;Y6+{o5pTG6K+s0IgoiF;A05a394q~{m
z_&_M5c+Hb~fFZg1ceQvGuUe)jEYu*2
zynUkal&Fy2Wr?Y`^>lS*GxZeGnv*_!SmA8_DjcwV+4JcS+b!<}{dQ3W0;>2ue$0$*
z8&us->T@msQgEmz3Z{YrY3V|Kwj3AjfjA4xT>a_UdO=R6-SQj}#>U1ZSmtjx@&mo4
z6C2T(Os%X!m%xUVoG_fo@ySVfftZ26qs>(--2*ems6VaQ
zMk+!~cQv>x=Ne_gb@`N@@Wjm>6%-6qEcAC3I?@Cy#y)=h5paW)#M0KbHvL?h)|D$>D%}io
zo+4o{r4y;~g}#Xw!`8s70?=R^zs$%WO>8V1D8+K4a(wyvm8k6`cKua5rRKgIGq*7v
zPSfC47*2cih~e-2ycd5=1J1EQp~H5-BCPC{D_4F`P6o&)iT|BLOIA+VgYyj#h(hK%
zQ_a}a)pcJ!i3E)w7zn37%Anv{qSvDz8tx^AfP9g!4oB4ydkp6sy;q(uy60F)0dQYT
zeZ+FWbyC`p>Q7HnbosO|U-lR*_F0|5N5>9C=j(GW+7nV8R6!ZvpbY4XPWtE1A7Tqf
zt1H!97mc^aZp5iV;lmg3(}RJs<>7e!ix&@;StV%&{rPj_iKNS<>_fa6jo8VI7774q
zU;tMsD$)2ePY;GF`>m|F^G1I@Ydu>TYY?l!jT0e4jw1c=(WB(#Lb%gr!Uk-(+K`4G$929f4lecBr
z*(VHRMFr7w$e-@7*)tn`!BCE!=C*-AE!|nEEpZwFz)nD0;t87D{N!+p9!5vYJ=aM}
zOCy&^pc08253?`qwt1bMJ%G{$fUT9T>pJ>8=JqM_OT!m;`^)1U(U)*8m{?ef1^piC
z^7$JJxf)9^tghoDV#_?JGk^K&)g8?=h4bfIrk?as_|F!7G2?uP&ig1Prq7`eJp;8t
zNL)Oqr^gVb*fZ5Ex7}Lz{Q2(IYYIs;z*@$~nR81^!|*BFCY-sV)E+*3NJB?_Zs4hx
zy01tDcPf_?o%Z_mou^Nqu6iNO92yyOSCva%NohNvCCU>SIeAk{i~pN9Z%DU(?X&yU
zAl7>qRUSXrOq8ZfYYs;9Xm4*P8SYf`RD}EJh*$cNw)I@I`dbSM5{ExCB7U
z8Q}M=fQ39~pH?xJPH*4g|B*9%bl!a?zITlb)eoq_Q<{xAbXY4lT7$dKPFzS@nkuh-
zcS_R^lyZcYnT5jZXZO_WeqQYrJ^(POD)=UE_+Md!BU7Yj=Jay02S3h;wyU_H?X_=
z%KiKIf3;jNDj6unX2us@j%{fXz-3{<*-o5=UqGOrM`n2^$*;I;PvuaOb@^Jh0iF=*JN4s=MUe-Un(iu=6M&f
z3~%g2nRa8F5zGj90yhvCSiq=scSyCv%c5bBS2Tk_j`{5CtA8q|s??cuenpU&K!Tb2S}035)X7M76#4^tmL&k&-t?S)>3(z$bV
zANz!u_wNt)tN~CUKsMmvhY#wz??EPjG=Q=LP=osQt-0B+qoYGoF|u)_^6OVB1_lNk
zeiJlIbW79{4Q`+lEki>p0Qv+?6$2wmGExF@ESc+kGV+=~kN#qre+e}dbp+Ugs0d(;
zhS@PY{-PeE!0x7|PDx22r41gu9ZTsi@AAbBdlIM^WxHZ{nESQfk#_zp5FszGQ~0DS
ztE<7h#p{N9DYg?E?z9saJ41+n7a)YfPbzS6ROjDgV+uEKPDGTQLuGTFX$v}<*hr3(
zq0f&a8H)4s_wQd853gEm@QwA}%>w``AWi_i^j~p9u$6ImOl@qaeAZiiLXTbh%0hD-
zAb$V;{qH@OMZ7oW5;bVc4tIj;qz-9ajTVDJ9
z%LO)l^Q}g;7s^)7dybwqk~cFO@ZElfJIWWLz+%~^7jVte`UYv^MU+i;n)07N8F2Qq
z3`_2HbX@X8Ev#Ce8=?NedNuDN-nV$6!^(BOLPD0(
zd{%fs%4p+^hvOk?05za~1AznSNv)3OE!BvX+7FVcL)`iJaqGrd*~YHU&Q1svpGM{e
z-)l&Gs2!;I{+;%&eBUWbKJWE0qk-9>dej4K^1&YA+Db!7S_>Nk$VSH+rTog)>dQQh
z^UHCZjvP5Mc*SYe&1CAp$G4*UATj_S_}cocc7hwS6$Yax1=J6H6!cj>T|EEuGWpZY
z^~TKR(YvTFZEZ{h|MZ-&GOD~LJr}2t$BVi=Q^~eL1Hfieuv$KY8iNvm@8%X4-_Q10
zA9LMU9)V6mZ7u%g%a>oFVm;-bKJ5tJm?~pyW8CsapPx98U0p02lT#bSK~rIF!i7JLq;o_#>5I`rdGSYZWK45Wi2&eDMC12_Dx2hAn)xa@b-RA3e=c!T3fH-uh?0l0
z%E!m|48*m`@|iB*L57gJs?Yinf&u~@tl^cG$NK@pDq71nZb8>naN(v4-UiNto%Q(f
zV=d5P@PO}zu{$pD(tliAlfw6RlaUgHD)o1%R@@3<66&wNVYUi19&?d)U&~IFLm+IxY*10Wyu66j0^|TXlwu9+wyDJj
z(z|fMBv&8r4HerDC0G`kBg9Zb9TPr&d>aWMTsagsR5I0-?&;}i--a+SR?GYj4`m7_
zg{R;h#TzSQ`i6$whh;R_#D+_;Rf+PGmGyCIku$MzK&)(;61(lG`=9h8Wed)T9XJR{
zP&8ud>(;9H2ftVx>z@$^POzK*a~>s@IOgUy?mtjbfnk2nyA#AEqXo?Vc4Z$l<_dEW
z6Np-4V`GF`q0b+noFZpyJ9WTOAy>cZ_itXJM&{?AK(hu-f;5$aD~I?3?JKBu0F?ql
z0N4aqq%%EOD*(fwJ`jdZ`ok{=z}~o{b^?xIYuv6g{|G3+&CP8r#033irew;Cy2+WdF}je3%JABdf<}x
zl{sya%fce3l|SVNO<>&ELCi{fV8-Qz^=5rTq(D(FgI
zpPmWI;ulYq1L|aq{7OFtS$=aG4
z2*j#Jig2vbb=4W#+Ah#Ziiq3+6M_iB8?6p?kdPL!0JufCq##8Ov}mx6_x1y1SlGUK
z_iz@o%a`|KIlF8YeYclFlY$WeWY^!%fu4Y)@F~w_?B`K*6*Q~3ml5RJU@O#$s52=|P1=xNj*E(#;%6}eS2z}>V(N*?2$uvUr>&lmgk~B(0{B@DAPTAqZm(To7w2Ve
z_=Uh{rk0jFy1Tp4Z>z6}82wKc0P+Xi6KpS{`l*o{8X5*b+>SeT4INc|(h)sK^NF}G
zU|jyCP{JqB(}PY2G>ZyFm_If)VLvVv83+*l+`VK&HMcr7*~Q7p81Dr#59(KKx(7!U
zMo4TzfudQbwb
z77xZg-Zg&a!>LoJ(4DbNRS!)z!%mnPH+}nd+h=3Mrv?=jrW6Xir*v1oI*M9*FR
zd_hP`%JZ3+Zjx3x)^U1cb*j=dw;VflVCGjPwT$A03yiQ@ZwRug}p(2wHska_6aLl6GrRtoBp4;eGI(*Vaa42o|SCZeM7C77CROUxOX><{~=1
z(}PiG2@AB8k&BuFj32re|Zi<2g7ZG
zQGV&trRg8%&YIv5ndZi_gr^@Foty;xl7sfElP*QmF2Wq@I+Eam9r{2sO&i*;%xa!Y
zCFDQAOnmg9SksjrVaXK;OX6ND$8o2gXpZ>K(eK#y`_G?`y9A4GH+l{q8+0i*u%Zfd
zG`gO?3U6|0Sgb6#IqfXnh}v+ENRuy01NI3));Kvi0lk!<6NASPIZRfPo08^O(
zze)T3`#1Z3xi7+JpGv6>r-}7?(%BTm$L0vN4rw>{^n^`$FBZWe3osDiS)}neaarR)
zq&|Pme%oSSkf`GIh0fm-6Ni*Z+B`PmE>%N8!TBTH@4UqI@dJ(fqnsbXo72m%N4Fw=
zS>*+-^Q~J+nVFRE09XR!PUrBrw%hy~~)vk0dVd_~?QTz@Q3!Be?LHhjWRN@{B{9rBAXD;hzv=DsG7Y5ajA3s=PM;QEwav>&~WWm{$58AB1#xK
z%@c9^tq|)7b$59v2J#FcBa6hTCUB4d#`gbd$pC=EDJC4Xg0l6SDA2TKf);BU4ZQIU
z1hi2*dNyMpD|S4I7qbOSg7(^nyLsw?#2Yp!
zhQLKqn`?7e%)zVgpk@NS0Iql_KLLls-f;Ku(ALunKqG+Yj4I$jxsOMvCYm3e$G|hH
z?(Wm4tOoDx`piypcpHF|6b7oCWI>ywV=SJTu_x#~d=4CC9?d80{=EhQF{uwg-RMKA
zNkinvgPElh&n9bqF%7jB>FVmLLP`S!03gGDW6dox*+t(;DXkfHJAoc3{QEyiNJgmM
zFdYT4Rn=*BAy+s=sODKkpPey+7JYV0vNtR3gE9eiRk`k916$kJ+yUY_
zFbDY(G*eRa9xg*dQWC;&PHyfN2*bGKh=@HfR@@4(rqn%zQ%(1CZ$#>s)h+
zJjzdYE1N`_>6G-(mX^Nn&rcJIBJS03X0#)&2#xWR=S3zXKqwu6=z%xDk^p#en@0
z{uA2MuEj+*bh1`W7w>l>aF1m~ci3jJ#QHO+TI}$fWQPRTKIv
ztwIQnIUE)6u-^05!j9#TFQEhie-Kz`7o)&4Y@r+ZE8k8&gD4GRh0ILgPZnWV7ZIvI
zOcpzgg@DNkQGq=GN^Xc^Zcah2bAij@kZO*v8QA7%;wb1Sbmki}uc_XorfvfgP*L`c
z<-G)w32q6(0g5*D_|7FAojU~AJN{T9`!aM)xI;beV@Lb##^DYHE2=Wjf`U;2T~jVRUCU2izhWdz6uibF6fv)v3
z_Bt0GKp29|&o^j2Me!jxosE|Z$P4S?S6d$JCbyVF%_WqDI1Sb%7P|A{$|-8(vhQks?8JDkS@SkOZNOn~DuX$cO
z>0$qHW~?*nDfFQJ4u()=DD+gGS%^DC)ft04VhvMYzSrU;w21Lf~i~tZqnDw1j{Hlpe|W40>kKRA~tO@ZP!i8h=O^SRMf}
zUP<^E247y5ixwnwM`R)7nS=lVo
zsE$S6KX==#MS->f)l`2tVZV+#t>~8u(o-Njgj8i>qs)U39%9FtDd9H?gB+p1y|J(O
zyLa`a&|H)*-ysYBCd$2Xn^%q5iz;q2Hc!t0o&>5U1f$0?lHd~Pi{7kGC5grzvIl+gzpFK7>as?
zER!)j1#Z07Ki%yJdq
z_PrK{U;f38C}2;(c>&E9cbnx>i^xMTF0C2w2ZWIN>B*>EGnflnDQ&lj&X!(0j~izf;J5E51FaDE-b%l_@VK!Wfws?hqCeBhbAjjC>Y=?_zd44Jgw}b
z(GDONa=9k5&%sbbl9c{;p=j6h!9o(}ULdk1wMU5^VdRGJ(
zflFrk0@*%Vi$>M_{vzoY+;yzUgS&du2K=HXP?gTawQ^z)W_^_l`Qjh)o{AH3S
z@A9Obcu>aE1`EHI8>d8QXLuQ2-M7SDTHUpt8x#2*5a8NVHzJ3Xk&$5@cpTe}{vhlr
zm{h3t&{Tl2k#kbui0HEKWjYqoyWOUjroA_2M9$IUxcg>F5#yK1jS0#OnL7QrztzB_AB40%}Sg0N4Z9Y9MXE4>-
z7;^pYsXmUZem5qT`7Bcof7rqA-RJpwJm<~;NFkSzQ8^VS*1rKw-93#3i#~M0ibP<*`0g#iZCfiM^w#JphGzP5_gsebb>P4OrhWSk
zYKKE-K~_XX;N2u**_A{=bm{eCgp&xJ0BWV{pY${$BZs}jOCNMD7E(dzela9Z**<@K
zk(zq9`2Ca~+k$;qVej=Bw#^wfnK#>y{tI@?{G?|~0y2s)cKFRbZmTnKhL?gB`=2%)eR0rAG=BR6gIPgq>nXli
z)y@=J1K`}$=AGNN(iV2uQihcTedEcD`DnQL8u-Zo8i}K$BlbE7dIiEq0E2SOlPl}q
zU~2G0h~ytb$Pk?p$G(NehPKnAtDi5F-XU0BYE9Sb*yl+^-iaWvuWl~@nJU*->BLP{
zF~eBP21w?_or0DGtsN00!tz6!4%sP$?l|Woo12?qhEzgI;oQGiiy&6FFq_ePyDfx_
zp?P;`oi#AsECq)}5QinQehAttA@%{|K)TLVo*W(ZH4rG9>Mlig0KO-%F!Nrvz@s;$
z6M443LaQgSgE|8)u?5KiQ~+QUTx06eJ8&Uj0fCsYwj1gTDLU!TbkZT(Y{zXY>JHQ8
zn-4b3(hxj?gsLwn@XMAPpN3}1#aA|otk;UqUT`{zTNGP;f0foWj>lS{RRpdWOW
ztE9hqnkzD}gI6R%l_rYLBKHUaQs>VrRRL9S?{ehc{y;6)L?<|w?7f+p87ii}kx?MB
zGRTA}r<`Ob!)2iWRGH40HARgMl#l=PkYYvQM!kjz2(y7LelP4hWhh@4f1z+$+uE}I
zPI9J4*TtbgHRUSNf39;%B;FYj3v6HbuK2vklhkxIaScGseRd=sa|~vppg7RTplk#!
zH$WdmC}8q>hn*O(IiV%aEh9B|2l**t*+4sxXk%~a8FDy@-kZMXUbQLwjl`v5Xk*aZ
zAxzne9*=KeVrG_j(PFI~7SV0cgz;o1=XHgfXR$iXM!t@mHU
zxCG*dcvW1AeA2fx9qSoaDL6l*dk$Mvjt_holHRQ#4BK$V)w+sd;M32}&eQJSuphzx
zl5zjQ*C6U5Af*z}7H5&F5mkv62jWTODt>otJcHDUs-4;dhbj=kj5^?uk*gsIci1&7
z^?pmV`u0Kt@7CW8bZ0*FGI~fS1_DUIsjaE0xm|Lg56}bk#AD&NX;n_|6we7ZXih&4
zqf)WiahppE#?({qy9Kd7M$eBfPIlzMi9;PinNSOI315B5q&~#?o;7J%GZ=HZsTGxh}z&B
zp@bkPaul9q(tdhKgkOk>0)6J+BG(r*GOuEa1>UdF@#9YALU)4sLZF^6dwNb+kW+i|
zQ0M|0PeRFJ#Sr8f-7Lcx&s;=1(6zdbAp@51_8%V=64!
zFG8FnQnMh_EQPYB98AcS5CYE(q_W)@=-3S1Tx;B{!U0Qe`c29vXePF*#_}L69-Qev
zzXX9269hyySIaiB_dLy+ukbTeVhcsILdj9cXz3qs&x2?L1pVG&hyrP(oT4I+)&rv4
z6pm#fUFIUeDBNq)fihLZkcw4nR-e)4ypcU@PUQ8vnzvPerX!0dm9v_T04#(rEGK#i
zYo~N!vI27N4*}e43k6K7f
z%%ye8MIu3E<53*%-eW#8K@H+H7wtE5k;xKveSZMEZEmCy(>o8`7Gb=_K7Cp`=5Z*h
z&NJ1Ge)Y)_BlOie|r*L^)gXJYzTGAlb9KlW{~pi?Oyuu~uSH(r(+E0Nep2(3^C
zQA*7hGm)s>Z;OS^%*+fNX63>t4+uFj91-3Es7_GjAyO$|01U<7UZoR=|MO=G#F|tS
z<2zyWqi8L4jYzA!9x}^if>%y~kUNY+^#1Sne-1h9IY*2IKxUO%|J%G6-BUL*r;Sy|
zZ=lEFDhPi&zQGiy0-_OyN&Jy(Soeg0FMRPLjH-}rHGHL*Jc0`b8MErZS7RY#L>Y0n
z#B6&MQK+FUjoyr|Ge;&Kl1KZRXMIpD4KsmQGCA>16qno-?IwVMzxAgov<
zBak3XY5D~X4&a0wSooP_6G
z8&acJ7{Y7q;qCzsLvV~z&e1s43h9GLSwXAB(n7$%5gxz@TWzF>a~aSCA=~JO1y-66
zhZWufj8xoPt_BEO13xn_W00cE)@&}(*%(^EcjhWl~{8S
z_8>PZqyTE`qIif_n%2A%Ef?`FO#Y!-Jo_&pfpBYTViP>JJ?4SyddL#1DO|$lKn=qk
zh*16x-v)dKZ4hM&QUl>^O0hOZjMR_JsU4vTKxZZ#YZx$!LBf=UNBH98a+QWw;XgN
zqutv3*??;-7!qL6FD!K`b!uf_xuEyVF1$@jK>;HQCKqXSpWOr9PQ+%jvIKx4@BKac
zAH0ojGexvKq?)jA&V05id8nZS$3#=Ov5=Iep={^~y{+uxMF-!=#1e+6z31Bw7RK&UE5TkO4_E`XF&nLxyP0Dug!fC?s4C
z?s^W80G$r|A3zgIJPtd8JkWII5w|ZXIe=8RB8(-qeZuh#u1MIxliGon0u2eRI7(N2
zTxkenZgvwLCm31M;0j}a4WaBncmSa+@4MhwDv4sMaT
z$WGuT6fyLip2fLE!VX3sP!?wiK&FUNJI*2pJRT4oZRk+zBp47;BYq@=q_4&$BpjN*
zfq9Zzh*Ry>kng~7iqo#|N^De7Q6Vq1xzBzJ^PR3z_zA3GDC?;aOjrz?(Q&j
z?;~KN@)E4w@b1md7!BvkMb`L_P;CM3}LIEsj1|5puE$MyeoLL6FA^U#6}hC4O-g$e%DFnjS^u5d#H4-XI80nD*FWnE1F0wRod0`cb}|58|3$eB{!=D~xi
zgsj9fG&&gW5FAi|Aca+hzQKeB4!X{1E{^}n0uZ?rq}%Zk5KTkR#Bl7VVS}@0sZo}I
z&hfe8PGj^8sW0@Cok?1*y{5ZJaNvp2hv49y-)sMrEcS4g?F>~`5WNedgcx%DFDC~F
z0;LiCyK8Z=n^4YEO-eMlIh#&fq?|_13cCP}_fuZe3az9l!p>Q**pc?MYRzn9{q~{%@S`CBIkgQt7((>Eg-N
zFD{jfw|@v+G&?-AN&cH-#;#{BYfgGZYUE@fHCxThKy9_9ufh7wb$4sb5mgZeJ}N2G
zZnBx=BiS+Bf=`Zy8)Tln7&&sVZd3_`#&xoA1d_g5qEKFUa2*g3nA7+`SS3dym7fq~Y{TRe=X4l{%xvD5bUIvsIGQL=lcSHUOW
zS-0O&2&5t!Dx~72mTd2QlSCuW8KrH06`iHUpR3P1Pj+Ek(3c*Lk^$^ZN+ZKHV}hmK+j#*V2CQ&i@uGWM~r|$?&xo~8~n8AiIkg;r)Cc|((NG0
z8fVY;Y6to~`Opq$4PlPlvazWqjC!d>{#snkHLIm6Ug{4KdlL9ui#Nw-)0;j-2@w9C
z`V-{fYQci4=PcG;T7Uc~ADi#7dkAsBq_@^pLct$F33f4Y3Ilr7EL2B(k~su
zI1#<67I_yi7fsMmOl3)L@NYg+5b;Zx&kPHf*cx>(R}*wF#}jAVB`CI-+&>E~_?8WuPd?gsWjXbC&v-osCg$+BcWhyF~p
z<&t9N2`c$}*O_Gf(S{E@ZR-
zGeOKI(k+jUUe0lu>Y*(87Rgm%c|18IgCOUymGL(phw5XLl-}y3bA47stq&P9>3nz7
z^gwJQjFHL^!O|e;KiDOJg2*|C$kRgL!<+?x0=g}M_`p$ZC475N{JprMwp=rE
zEHJD7_K)*}9#*Rh6aB1FNAv{*1n!M}MrNQk>;UeJY2EGkP^Qe*j*ffTo;MWF
zs!h4noC91YX;QAPha-9*4(|}>OXEc?_(GCs{
z-b%Ra@OT1~1*Dj%L#${3M2}$U5E#(gt7qvUh#`P2UuaMR0ar*y#;0?priG{sVcrpGze*ErV@1PeC62BOyaHr
zETQ{~#C^H7i?+jB6wn~#p0>lE9}U><0G8H0#TPxl4mot&`M|J&E)O@6FyR3Hv_I-o
z>2!2{j0H#_di
zvgagqtt5!VX%J655n@sV_#-a))TvNBRzsWiQl=i#q#==u;KkU^XzuSvC5}U>K)t{z
zK(f}lSsBHMc+=v{4{dyVxFw6Fe
z!`;OkZ?>c@tY^?!kHtx))2DZRO_T{oLqQ@B4_i2YpQ_%s>cyFOqy{LVWO}z@sEp`)
zw!Lr(?f{@soAZBlZoa^z8l9D)r!UU!quK9NA_0WO+(0vggObk=EI`XmdL)IysXa3$yv
z$nshM2i@#D;v%s#P^PTt%C96Fj)EzIbcx{@=Jc4#-)ZsEN5kv
zp&H{Tkx+0d5IQ1(WD(iH)`4cm!?3^R5=~E67yrF*3}!2J)jFuutQ4MexbeDoiZSsD
z)?6TlZtohfK9R6!u)uJG@QD)`Tykw*c7})bfNLj4jBt}Mv9kZ`1rLwa0OQM(zhB{P
zA=BU}h|(WvLgjK#02IUmLKPCFUo713u=2L$#@@!;$o$|zDe_LTpRUh>+;h;pYqvI)
ze%xhW02l!60Bn{B3%xl7NiSaXCW=5U$RS_eFoj8X6I6n
z?hj}@%CTatHbkC(XXwgyN#m3vM9e}sNYbR>m4uiMT6>B&79{WQvMt}O3`S-SWe!V@|yC1#ETr#QugMG
z;D2&L(3rZ-K>n&bk0AZ*Ek2{ovBe9KTurRYN*AeO2bIihPpBL6(u;Oon02XNyzRN!
zYyCr6x-3pZc^j3>if5z>r_F-r2fDH514CmPJ><&o)m+pLawiK(*HybDU
zdShm%y3%sJdxz#qH$Gh1?7THGr)V>>@=IgVuCV14$A#Mu&)Mf+ng|$RXOmp+U2!bh
za68=iJbG%c+rjbq)=L}_G^cGat9Cg{@US4iU~d^;^o>f|{fo0-q8P2dj?9U5_s);V
zYjzu!O^x|94?22J%(+d^$X$Uts5aD9k3`lK}XL(r8oYM{+^GYO{V9FP+o6`9Qa0y_hxY&?ay1
z*UX;_NP~#6gs#1<9rm6okhVLU6OWc51k-d)zQ0d?52_vsgzKki_V^hhU?W+uaGxPg~}@Z3a4~
z-nU58H2KY3+hg6delX1pa!;=Ka}@7GVME&aKpCD$(Zcb^287Qgc4aUoi1^3me#Os`
zPe`QOjeb8wxQb3r&k!j_!G*&IhLv2k0m??q0X21UPcaofnL)A)!Ja<w#`-Frhp;`Q`lGtCBJqz)Fs|-y@$HxD&(XKb@$fPv8Y5J>6zeP@
zo}m07eS|qeS$2}5Ztt^W?`gUT;cUDke-|olu%bLa*ZRft7Aa>{@LVaH9lwqkEmNR%
z_~zgGIOx!Y*H2XOVHmUicTIcN3;P~y|M#!j+lAUV{{8ccgzWOa52CoY+&F}|prDsq
znf~`5oG0-+`tP@rDU5TJ{{5Qnmfi3E{Z~JBd&b20!@T9leN0Byp*MDedk3>4!u)8UZ9n{Fq(syPwT>(ijZt^V0t}
z{_3XjI{>c)g%2U3A2;UxY^e6^X&UJvrGAIEfsBpx6Y=zt#Z|FwM*$Qdwf#3Jh3(p}
zlxHA7%)sN3JMb1DbdV7K9cH^RKm*h>`KU9zb^u4?E*yf*NnMEq;#m?PkWg~51*ew9
zyENflh{jg~c@hx{x4GeaH^=1SV`F9U$R9-X5#@iVK6EP)Pk#dc0LaAji%(Mp4+(TD
zyv;MXE09L{S{iJ-F*xc_c;BILt&rl&;MZ{YM2Oyn*OI0*EfwyNz_lQ|M@)g^;c(D6
zD@UDrnq82@Ai+xY8$HKEvw&4lI}oW)hVF|A7fiMwrE8k&X_IBd!Ld@Gx>i6uEX?Bp
zCDM2VpIn_P3-4VU79&h(FtVj7fT*S4DdE$^G$t%kkA?9qKMl8`>kf~M%uE(d{hKkt
zd?D_Y{qJuF5obv<5Fnt(qfQx!9~%&9_M-v;!g!Z$hILBgqspw0o<)?Mn6Lvkkg1M%
zpO1tLrh(S_8O!iQ#E+$3t9#tc4;2Ns
zi#jzkG5Ug$*WR*~mTUr1Y#x2C`a7;a(%%_mF{*#D$
zp_oB#!iW(u`>V@Gh+SCRA%}hXyLym}_a1xyeT5~NJXyI(f~+skr95HoUb+Q(0MsYoZ*PWfzlfX9yamX(>-
zLYLr3&>RT&S>yaqbI!5tmBvoP^g8x0%Si{qp#VBXf^e~fk|@Rg0^;xav3eja781zlHR>raNHBr
zdls+i19uw{pB%H=uzSx(K6BL5?%~Z>(|@ee^op}
z?P1ytI)jiQnl)&jFQ@~aJ%f4ms;av?f(Q>4b{U!$&>XZhJo*H&8B8q
z^=(d42e1nK_n^lihh;}$p1{fbTxxi%2CeVU(Bai4zcih6(1aNmpG6leIrPzFJVq|>
zAf6xr69}ncC6PGKZw-eyI6hpXR&qL+MvFWev8F6kM0Oq~A%r*F