{"version":1,"nodes":[{"version":"4.18.26","payload":"quay.io/openshift-release-dev/ocp-release@sha256:dcd5fce7701d1e568ffb1065800a4aa34c911910400209224e702b951412171d","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.]([1-3]?[0-9])|18[.](1?[0-9]|2[0-3]))[+].*$|^4[.](17[.].*|18[.](1?[0-9]|2[0-2]))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:dcd5fce7701d1e568ffb1065800a4aa34c911910400209224e702b951412171d","url":"https://access.redhat.com/errata/RHSA-2025:17657"}},{"version":"4.19.4","payload":"quay.io/openshift-release-dev/ocp-release@sha256:8153a8c010b292c0c4ca7d8b4ca13ebeb634d449982c66568764511c736281b8","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|4.18[.].*|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:8153a8c010b292c0c4ca7d8b4ca13ebeb634d449982c66568764511c736281b8","url":"https://access.redhat.com/errata/RHSA-2025:10771"}},{"version":"4.19.17","payload":"quay.io/openshift-release-dev/ocp-release@sha256:5c01281c55d75a1569440f91d2708125f14533c675b96d7be67b7a1badd759e5","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]18[.].*|^4[.](18[.][0-9]*|19[.]([0-9]|1[0-2]))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:5c01281c55d75a1569440f91d2708125f14533c675b96d7be67b7a1badd759e5","url":"https://access.redhat.com/errata/RHSA-2025:18233"}},{"version":"4.18.9","payload":"quay.io/openshift-release-dev/ocp-release@sha256:720f89718effd16de7d77e5533c9608f1845295a2e00dfff543d0cf9aa09b2a0","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:720f89718effd16de7d77e5533c9608f1845295a2e00dfff543d0cf9aa09b2a0","url":"https://access.redhat.com/errata/RHSA-2025:3775"}},{"version":"4.18.16","payload":"quay.io/openshift-release-dev/ocp-release@sha256:0dac222584991f89a123d85e8c3055f0056e5876fc209b8d4bea7a59e7504d59","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.](2[0-8]|[1]?[0-9])|18[.](1[01]|[0-9]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:0dac222584991f89a123d85e8c3055f0056e5876fc209b8d4bea7a59e7504d59","url":"https://access.redhat.com/errata/RHSA-2025:8284"}},{"version":"4.20.21","payload":"quay.io/openshift-release-dev/ocp-release@sha256:54c81ab130a264829c9a3434df1005074cc9b2edc8e31ead40ac94faf3debf72","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:54c81ab130a264829c9a3434df1005074cc9b2edc8e31ead40ac94faf3debf72","url":"https://access.redhat.com/errata/RHBA-2026:13863"}},{"version":"4.19.19","payload":"quay.io/openshift-release-dev/ocp-release@sha256:dedfa946ff3535dfe5b3f682306d533f00aa07cbe237d6df472dfb6fb88f5bb4","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:dedfa946ff3535dfe5b3f682306d533f00aa07cbe237d6df472dfb6fb88f5bb4","url":"https://access.redhat.com/errata/RHBA-2025:21363"}},{"version":"4.18.27","payload":"quay.io/openshift-release-dev/ocp-release@sha256:4686c8d26194f890c2a241271d41a762d4be26af0be60e9cfd0c563f61b3beab","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:4686c8d26194f890c2a241271d41a762d4be26af0be60e9cfd0c563f61b3beab","url":"https://access.redhat.com/errata/RHSA-2025:19047"}},{"version":"4.19.5","payload":"quay.io/openshift-release-dev/ocp-release@sha256:bc79be35e8b8a3719a3e16c91b64e5945c6c4ff1a9c9d0816339f14e2b004385","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:bc79be35e8b8a3719a3e16c91b64e5945c6c4ff1a9c9d0816339f14e2b004385","url":"https://access.redhat.com/errata/RHSA-2025:11363"}},{"version":"4.20.8","payload":"quay.io/openshift-release-dev/ocp-release@sha256:91606a5f04331ed3293f71034d4f480e38645560534805fe5a821e6b64a3f203","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:91606a5f04331ed3293f71034d4f480e38645560534805fe5a821e6b64a3f203","url":"https://access.redhat.com/errata/RHBA-2025:23103"}},{"version":"4.20.3","payload":"quay.io/openshift-release-dev/ocp-release@sha256:24da924c84a1dfa28525f85525356cf1ac4fbe23faec7c66d1890e0b3bcba7a0","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:24da924c84a1dfa28525f85525356cf1ac4fbe23faec7c66d1890e0b3bcba7a0","url":"https://access.redhat.com/errata/RHSA-2025:19890"}},{"version":"4.18.17","payload":"quay.io/openshift-release-dev/ocp-release@sha256:9d24a8cdd67b8f18c99547d5910e4863e7aab5bd888e26670a00dbda0a9d4687","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.](2[0-8]|[1]?[0-9])|18[.](1[01]|[0-9]))[+].*$|4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:9d24a8cdd67b8f18c99547d5910e4863e7aab5bd888e26670a00dbda0a9d4687","url":"https://access.redhat.com/errata/RHSA-2025:8560"}},{"version":"4.19.1","payload":"quay.io/openshift-release-dev/ocp-release@sha256:4d7f10e383deb0c5402f871bf66ebdcad6bb670cb3cf1668bfec5166c56f3196","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.]18[.]1[6-8][+].*$|4.18[.].*|4.18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:4d7f10e383deb0c5402f871bf66ebdcad6bb670cb3cf1668bfec5166c56f3196","url":"https://access.redhat.com/errata/RHSA-2025:9278"}},{"version":"4.20.5","payload":"quay.io/openshift-release-dev/ocp-release@sha256:c1568bf00f149d16b4cbe5cd8aedf3bef110c1460a91f81688aca8e338806a2c","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:c1568bf00f149d16b4cbe5cd8aedf3bef110c1460a91f81688aca8e338806a2c","url":"https://access.redhat.com/errata/RHBA-2025:21811"}},{"version":"4.19.6","payload":"quay.io/openshift-release-dev/ocp-release@sha256:02ec914b5380b9e4e048b830c9521e8d11f7f613d4ff3977147107770288a595","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:02ec914b5380b9e4e048b830c9521e8d11f7f613d4ff3977147107770288a595","url":"https://access.redhat.com/errata/RHSA-2025:11673"}},{"version":"4.18.28","payload":"quay.io/openshift-release-dev/ocp-release@sha256:98c80d92a2ef8d44ee625b229b77b7bfdb1b06cbfe0d4df9e2ca2cba904467f7","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:98c80d92a2ef8d44ee625b229b77b7bfdb1b06cbfe0d4df9e2ca2cba904467f7","url":"https://access.redhat.com/errata/RHBA-2025:19865"}},{"version":"4.19.20","payload":"quay.io/openshift-release-dev/ocp-release@sha256:90a6c7e4e570bd1914578a2b5b6c9847a2b877c466e29c97e75f28de73228a3b","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:90a6c7e4e570bd1914578a2b5b6c9847a2b877c466e29c97e75f28de73228a3b","url":"https://access.redhat.com/errata/RHBA-2025:22278"}},{"version":"4.20.4","payload":"quay.io/openshift-release-dev/ocp-release@sha256:5b87a665045cdfe0a1b271024be936a0c46de17b25a112d6a136c5af89d861c4","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:5b87a665045cdfe0a1b271024be936a0c46de17b25a112d6a136c5af89d861c4","url":"https://access.redhat.com/errata/RHBA-2025:21228"}},{"version":"4.19.7","payload":"quay.io/openshift-release-dev/ocp-release@sha256:bd4cd954feebfe3a6b2847c20271e8f3ba21e99ac1e234db6ce4cf2207f8955a","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.](18[.].*|19[.][0-6])[+].*$|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:bd4cd954feebfe3a6b2847c20271e8f3ba21e99ac1e234db6ce4cf2207f8955a","url":"https://access.redhat.com/errata/RHSA-2025:12341"}},{"version":"4.18.4","payload":"quay.io/openshift-release-dev/ocp-release@sha256:61dffd292f6689a3381dd05f7845dcd5d27c099fce2f460aa03d760d535f81e6","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|4[.]17[.].*|4[.](17[.](1[01]|0-.*|[0-9])|18.0-(ec[.].*|rc[.][0-3]))|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:61dffd292f6689a3381dd05f7845dcd5d27c099fce2f460aa03d760d535f81e6","url":"https://access.redhat.com/errata/RHSA-2025:2449"}},{"version":"4.18.18","payload":"quay.io/openshift-release-dev/ocp-release@sha256:eca2e3f7de2bd92b18f69547c8f0ad842fdb83f0821f76b8692f2716a86b0bde","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:eca2e3f7de2bd92b18f69547c8f0ad842fdb83f0821f76b8692f2716a86b0bde","url":"https://access.redhat.com/errata/RHSA-2025:9269"}},{"version":"4.19.18","payload":"quay.io/openshift-release-dev/ocp-release@sha256:3fb2c0faf6cc35dae23fd9fb4182c89df3e7c5272505652c7e6dced31c416daf","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*|^4[.](18[.][0-9]*|19[.]([0-9]|1[0-2]))[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:3fb2c0faf6cc35dae23fd9fb4182c89df3e7c5272505652c7e6dced31c416daf","url":"https://access.redhat.com/errata/RHBA-2025:19301"}},{"version":"4.19.21","payload":"quay.io/openshift-release-dev/ocp-release@sha256:7c2001c24aa550aa228cd2d0fc0b5d9ac6656cd4267cd7c156ec758d0687758e","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:7c2001c24aa550aa228cd2d0fc0b5d9ac6656cd4267cd7c156ec758d0687758e","url":"https://access.redhat.com/errata/RHBA-2025:22786"}},{"version":"4.20.12","payload":"quay.io/openshift-release-dev/ocp-release@sha256:c9bae4933c711d664e15da5b98e6e057fda51697aef4f3ec8e932922aa969373","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:c9bae4933c711d664e15da5b98e6e057fda51697aef4f3ec8e932922aa969373","url":"https://access.redhat.com/errata/RHSA-2026:1000"}},{"version":"4.18.29","payload":"quay.io/openshift-release-dev/ocp-release@sha256:8c885ea0b3c5124989f0a9b93eba98eb9fca6bbd0262772d85d90bf713a4d572","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:8c885ea0b3c5124989f0a9b93eba98eb9fca6bbd0262772d85d90bf713a4d572","url":"https://access.redhat.com/errata/RHBA-2025:21797"}},{"version":"4.20.16","payload":"quay.io/openshift-release-dev/ocp-release@sha256:5e2fb7977a82237e497443e2bb53fd1c196e083fc5095294699399b61ce02746","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:5e2fb7977a82237e497443e2bb53fd1c196e083fc5095294699399b61ce02746","url":"https://access.redhat.com/errata/RHSA-2026:3855"}},{"version":"4.20.1","payload":"quay.io/openshift-release-dev/ocp-release@sha256:cbde13fe6ed4db88796be201fbdb2bbb63df5763ae038a9eb20bc793d5740416","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:cbde13fe6ed4db88796be201fbdb2bbb63df5763ae038a9eb20bc793d5740416","url":"https://access.redhat.com/errata/RHSA-2025:19003"}},{"version":"4.18.36","payload":"quay.io/openshift-release-dev/ocp-release@sha256:7486ca2ec3bce0ee41dd2c03d75e120b6f660929ea50240463937ae1c4b118f7","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:7486ca2ec3bce0ee41dd2c03d75e120b6f660929ea50240463937ae1c4b118f7","url":"https://access.redhat.com/errata/RHSA-2026:5133"}},{"version":"4.18.19","payload":"quay.io/openshift-release-dev/ocp-release@sha256:e6d80b9ab85b17b47e90cb8de1b9ad0e3fe457780148629d329d532ef902d222","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:e6d80b9ab85b17b47e90cb8de1b9ad0e3fe457780148629d329d532ef902d222","url":"https://access.redhat.com/errata/RHSA-2025:9725"}},{"version":"4.20.17","payload":"quay.io/openshift-release-dev/ocp-release@sha256:e0e62ca0bf43f7e9c18551fb8907c882c60cfa0c76392d3a8115a628da02d693","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:e0e62ca0bf43f7e9c18551fb8907c882c60cfa0c76392d3a8115a628da02d693","url":"https://access.redhat.com/errata/RHSA-2026:5142"}},{"version":"4.19.23","payload":"quay.io/openshift-release-dev/ocp-release@sha256:7bf7c5dabc70518b89130ff7cfe62d14a61ed800adb418359bd9dcaa17b50206","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:7bf7c5dabc70518b89130ff7cfe62d14a61ed800adb418359bd9dcaa17b50206","url":"https://access.redhat.com/errata/RHSA-2026:1552"}},{"version":"4.19.9","payload":"quay.io/openshift-release-dev/ocp-release@sha256:b6f3a6e7cab0bb6e2590f6e6612a3edec75e3b28d32a4e55325bdeeb7d836662","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.].*|19[.][0-6])[+].*$|^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|19[.][0-7])[+].*$|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:b6f3a6e7cab0bb6e2590f6e6612a3edec75e3b28d32a4e55325bdeeb7d836662","url":"https://access.redhat.com/errata/RHSA-2025:13848"}},{"version":"4.19.22","payload":"quay.io/openshift-release-dev/ocp-release@sha256:e4377ba202c97eccba15c3a428cd4e532a02d5420d5b8918cdd3284404abb1ba","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:e4377ba202c97eccba15c3a428cd4e532a02d5420d5b8918cdd3284404abb1ba","url":"https://access.redhat.com/errata/RHBA-2026:0682"}},{"version":"4.18.37","payload":"quay.io/openshift-release-dev/ocp-release@sha256:9b7068aa6f6087c2f0a7cefa241c5dbb0ede0efaad783607dff0da98cac432d2","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:9b7068aa6f6087c2f0a7cefa241c5dbb0ede0efaad783607dff0da98cac432d2","url":"https://access.redhat.com/errata/RHSA-2026:6554"}},{"version":"4.18.5","payload":"quay.io/openshift-release-dev/ocp-release@sha256:93879f84b3165c5b5bd1fdf4563a11155dc61ea35cd93e67dc61c2b66e11c8bb","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|4[.]17[.].*|4[.](17[.](1[01]|0-.*|[0-9])|18.0-(ec[.].*|rc[.][0-3]))|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:93879f84b3165c5b5bd1fdf4563a11155dc61ea35cd93e67dc61c2b66e11c8bb","url":"https://access.redhat.com/errata/RHSA-2025:2705"}},{"version":"4.18.2","payload":"quay.io/openshift-release-dev/ocp-release@sha256:46f9db00dac167897378825ea5f3cce0867743ac90498bbb61b0816daedd0d00","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|4[.]17[.].*|4[.](17[.](1[01]|0-.*|[0-9])|18.0-(ec[.].*|rc[.][0-3]))|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:46f9db00dac167897378825ea5f3cce0867743ac90498bbb61b0816daedd0d00","url":"https://access.redhat.com/errata/RHBA-2025:1904"}},{"version":"4.18.1","payload":"quay.io/openshift-release-dev/ocp-release@sha256:d9c985464c0315160971b3e79f5fbec628d403a572f7a6d893c04627c066c0bb","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|4[.]17[.].*|4[.](17[.](1[01]|0-.*|[0-9])|18.0-(ec[.].*|rc[.][0-3]))|4[.]18[.]0-ec[.]4[+].*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:d9c985464c0315160971b3e79f5fbec628d403a572f7a6d893c04627c066c0bb","url":"https://access.redhat.com/errata/RHSA-2024:6122"}},{"version":"4.19.24","payload":"quay.io/openshift-release-dev/ocp-release@sha256:3ef832b8bb0d56331035ba54af36c36be46d6c6dc1a41e300055692f02bb001d","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:3ef832b8bb0d56331035ba54af36c36be46d6c6dc1a41e300055692f02bb001d","url":"https://access.redhat.com/errata/RHSA-2026:2651"}},{"version":"4.19.10","payload":"quay.io/openshift-release-dev/ocp-release@sha256:2f9145136fb387d43c7fff55b30a036c14eb96b0992c292274b6f543c6c33857","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.].*|19[.][0-6])[+].*$|^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|19[.][0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:2f9145136fb387d43c7fff55b30a036c14eb96b0992c292274b6f543c6c33857","url":"https://access.redhat.com/errata/RHBA-2025:14823"}},{"version":"4.19.13","payload":"quay.io/openshift-release-dev/ocp-release@sha256:b221339d28377e7654ecfa76debf7cd11eccc4e45516cca393df6a5ca4dbc736","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.].*|19[.][0-6])[+].*$|^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|.*|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|19[.][0-7])[+].*$|^4[.](18[.][0-9]*|19[.]([0-9]|1[0-2]))[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:b221339d28377e7654ecfa76debf7cd11eccc4e45516cca393df6a5ca4dbc736","url":"https://access.redhat.com/errata/RHBA-2025:16148"}},{"version":"4.18.30","payload":"quay.io/openshift-release-dev/ocp-release@sha256:349912ef80ff71bdb591b36d8b3eca9df2446fa2497af08058d1777b8e0cf3ca","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:349912ef80ff71bdb591b36d8b3eca9df2446fa2497af08058d1777b8e0cf3ca","url":"https://access.redhat.com/errata/RHBA-2025:22696"}},{"version":"4.18.20","payload":"quay.io/openshift-release-dev/ocp-release@sha256:5e06105a6ba80d04eb5d8d3f9a672fb743ce4710876d99a375c2d9f7b7eaa783","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:5e06105a6ba80d04eb5d8d3f9a672fb743ce4710876d99a375c2d9f7b7eaa783","url":"https://access.redhat.com/errata/RHSA-2025:10767"}},{"version":"4.18.6","payload":"quay.io/openshift-release-dev/ocp-release@sha256:61fdad894f035a8b192647c224faf565279518255bdbf60a91db4ee0479adaa6","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|4[.]17[.].*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:61fdad894f035a8b192647c224faf565279518255bdbf60a91db4ee0479adaa6","url":"https://access.redhat.com/errata/RHSA-2025:3066"}},{"version":"4.19.25","payload":"quay.io/openshift-release-dev/ocp-release@sha256:02dc35500ba334f341cccbd671471d0941417d135c958a357fb17de61c3ec743","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.][0-9]*|19[.]([0-9]|1[0-7]))[+].*$|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:02dc35500ba334f341cccbd671471d0941417d135c958a357fb17de61c3ec743","url":"https://access.redhat.com/errata/RHBA-2026:3394"}},{"version":"4.20.18","payload":"quay.io/openshift-release-dev/ocp-release@sha256:2dab927fd20984e247301b2483083b71f942a1f550f5d8a1db42897edc042e39","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:2dab927fd20984e247301b2483083b71f942a1f550f5d8a1db42897edc042e39","url":"https://access.redhat.com/errata/RHSA-2026:6564"}},{"version":"4.18.38","payload":"quay.io/openshift-release-dev/ocp-release@sha256:deacb4132f024a8c364ab8589b7b3f391b887ca4ae92bd4b37df2efa5b1e2145","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:deacb4132f024a8c364ab8589b7b3f391b887ca4ae92bd4b37df2efa5b1e2145","url":"https://access.redhat.com/errata/RHSA-2026:8448"}},{"version":"4.20.2","payload":"quay.io/openshift-release-dev/ocp-release@sha256:0e232879e27fb821eeb1d0e34f9bd8f85e28533836e59cc7fee96fcc9f3851cd","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:0e232879e27fb821eeb1d0e34f9bd8f85e28533836e59cc7fee96fcc9f3851cd","url":"https://access.redhat.com/errata/RHSA-2025:19296"}},{"version":"4.20.19","payload":"quay.io/openshift-release-dev/ocp-release@sha256:e37bcdba07c7312607363ddf5a8e317e4b6952b1465704b38c9a081d095697be","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:e37bcdba07c7312607363ddf5a8e317e4b6952b1465704b38c9a081d095697be","url":"https://access.redhat.com/errata/RHBA-2026:8430"}},{"version":"4.19.26","payload":"quay.io/openshift-release-dev/ocp-release@sha256:64d312cc715ccb58e44b7ed0a7a1a24ad407b72be2cb865512f9bf8ff7578524","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:64d312cc715ccb58e44b7ed0a7a1a24ad407b72be2cb865512f9bf8ff7578524","url":"https://access.redhat.com/errata/RHSA-2026:4434"}},{"version":"4.20.6","payload":"quay.io/openshift-release-dev/ocp-release@sha256:a29bcbc9f286d68b394ffa0288c5de7e487c90077c06cbaf7a4cadeb0398ce28","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:a29bcbc9f286d68b394ffa0288c5de7e487c90077c06cbaf7a4cadeb0398ce28","url":"https://access.redhat.com/errata/RHSA-2025:22257"}},{"version":"4.19.27","payload":"quay.io/openshift-release-dev/ocp-release@sha256:67559510d25d5024c374d67104b99d88f83746f615fa8232ae778d9e7fec14f8","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]18[.].*|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:67559510d25d5024c374d67104b99d88f83746f615fa8232ae778d9e7fec14f8","url":"https://access.redhat.com/errata/RHSA-2026:5878"}},{"version":"4.19.12","payload":"quay.io/openshift-release-dev/ocp-release@sha256:f0ca7c0e9ede6440119f3fd90abdd87e77cf99b7e68d6c1f95ec1872c62cbb17","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.].*|19[.][0-6])[+].*$|^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|.*|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|19[.][0-7])[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:f0ca7c0e9ede6440119f3fd90abdd87e77cf99b7e68d6c1f95ec1872c62cbb17","url":"https://access.redhat.com/errata/RHBA-2025:15694"}},{"version":"4.18.21","payload":"quay.io/openshift-release-dev/ocp-release@sha256:9d1b107adad76f023493b8c2b74902639f66273cc120e255454ad447a9ef27d9","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:9d1b107adad76f023493b8c2b74902639f66273cc120e255454ad447a9ef27d9","url":"https://access.redhat.com/errata/RHSA-2025:11677"}},{"version":"4.18.22","payload":"quay.io/openshift-release-dev/ocp-release@sha256:16078b671c7f5490a2136f2cd9a694d48bb38af1280ef9e2ae9ce28af075cca5","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|^4[.](17[.].*|18[.](1?[0-9]|2[0-1]))[+].*$|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|17[.](([0-9]|[1-2][0-9]|3[0-7])))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:16078b671c7f5490a2136f2cd9a694d48bb38af1280ef9e2ae9ce28af075cca5","url":"https://access.redhat.com/errata/RHSA-2025:13325"}},{"version":"4.20.11","payload":"quay.io/openshift-release-dev/ocp-release@sha256:bbe1ffd9b28cbe6814cb6cecef844733f38aba1a3d400b1bc5aff5865cfe665e","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:bbe1ffd9b28cbe6814cb6cecef844733f38aba1a3d400b1bc5aff5865cfe665e","url":"https://access.redhat.com/errata/RHSA-2026:0663"}},{"version":"4.18.10","payload":"quay.io/openshift-release-dev/ocp-release@sha256:be8bcea2ab176321a4e1e54caab4709f9024bc437e52ca5bc088e729367cd0cf","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|.*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:be8bcea2ab176321a4e1e54caab4709f9024bc437e52ca5bc088e729367cd0cf","url":"https://access.redhat.com/errata/RHSA-2025:4019"}},{"version":"4.19.14","payload":"quay.io/openshift-release-dev/ocp-release@sha256:f8e21e76897b3f9b8a76a07b5a9426ba8def9b2e56b18d8b40ad65931b8bbf78","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|4[.]18[.].*|^4[.](18[.][0-9]*|19[.]([0-9]|1[0-2]))[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:f8e21e76897b3f9b8a76a07b5a9426ba8def9b2e56b18d8b40ad65931b8bbf78","url":"https://access.redhat.com/errata/RHBA-2025:16693"}},{"version":"4.18.7","payload":"quay.io/openshift-release-dev/ocp-release@sha256:91037938dc2ebc2732e7baa6eb4192fa4376abab19f0f545848a87ab7c91931d","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:91037938dc2ebc2732e7baa6eb4192fa4376abab19f0f545848a87ab7c91931d","url":"https://access.redhat.com/errata/RHBA-2025:3293"}},{"version":"4.18.23","payload":"quay.io/openshift-release-dev/ocp-release@sha256:0bf2e8c1edf16de717c330b94d85f6d463c7208956b0a545cbb3fcf715e14c38","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|^4[.](17[.].*|18[.](1?[0-9]|2[0-2]))[+].*$|^4[.](17[.].*|18[.](1?[0-9]|2[0-1]))[+].*$|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|17[.](([0-9]|[1-2][0-9]|3[0-7])))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:0bf2e8c1edf16de717c330b94d85f6d463c7208956b0a545cbb3fcf715e14c38","url":"https://access.redhat.com/errata/RHSA-2025:14820"}},{"version":"4.19.15","payload":"quay.io/openshift-release-dev/ocp-release@sha256:d96bf58288bfe00d347707ba0b9fa5455ee0d506ae4dfe417518473197ee16ab","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|4[.]18[.].*|^4[.](18[.][0-9]*|19[.]([0-9]|1[0-2]))[+].*$|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:d96bf58288bfe00d347707ba0b9fa5455ee0d506ae4dfe417518473197ee16ab","url":"https://access.redhat.com/errata/RHBA-2025:17237"}},{"version":"4.18.11","payload":"quay.io/openshift-release-dev/ocp-release@sha256:b3c76706606940d84964095aaab1a8ed4eca0d1bd6833b4eb718115842ef6850","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|.*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:b3c76706606940d84964095aaab1a8ed4eca0d1bd6833b4eb718115842ef6850","url":"https://access.redhat.com/errata/RHSA-2025:4211"}},{"version":"4.20.14","payload":"quay.io/openshift-release-dev/ocp-release@sha256:682e85bfe8034924e596b281ed8fefe4451e6f6c5bac07b5ec300443eeb23566","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:682e85bfe8034924e596b281ed8fefe4451e6f6c5bac07b5ec300443eeb23566","url":"https://access.redhat.com/errata/RHSA-2026:2119"}},{"version":"4.19.29","payload":"quay.io/openshift-release-dev/ocp-release@sha256:4e97f1a9f5f5e751c7795ab6638723b064447106845f3d75d35b03a1a6c9488c","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:4e97f1a9f5f5e751c7795ab6638723b064447106845f3d75d35b03a1a6c9488c","url":"https://access.redhat.com/errata/RHSA-2026:10093"}},{"version":"4.19.11","payload":"quay.io/openshift-release-dev/ocp-release@sha256:d28dff1fd2bbbf7e923d24da21c921c53b61089690fbbe9d4b03c847487c2b5f","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.].*|19[.][0-6])[+].*$|^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|^4[.](18[.](1?[0-9]|2[0-1])|19[.][0-3])[+].*$|4[.]18[.].*|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|19[.][0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:d28dff1fd2bbbf7e923d24da21c921c53b61089690fbbe9d4b03c847487c2b5f","url":"https://access.redhat.com/errata/RHBA-2025:15293"}},{"version":"4.19.28","payload":"quay.io/openshift-release-dev/ocp-release@sha256:b274766f7194a7dc825e335a54078790519c0dbe3431c029fd08dbba1c431855","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]18[.].*|.*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:b274766f7194a7dc825e335a54078790519c0dbe3431c029fd08dbba1c431855","url":"https://access.redhat.com/errata/RHSA-2026:7249"}},{"version":"4.19.0","payload":"quay.io/openshift-release-dev/ocp-release@sha256:3482dbdce3a6fb2239684d217bba6fc87453eff3bdb72f5237be4beb22a2160b","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.]18[.]1[6-8][+].*$|^4[.]18[.](1[01]|[0-9])[+].*$|4.18[.].*|4.18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:3482dbdce3a6fb2239684d217bba6fc87453eff3bdb72f5237be4beb22a2160b","url":"https://access.redhat.com/errata/RHSA-2024:11038"}},{"version":"4.18.12","payload":"quay.io/openshift-release-dev/ocp-release@sha256:31e8978d1f7a24c3e70dcc12c93dd5e73311b78e528f73beb020ddbe3270e07d","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.](2[0-8]|[1]?[0-9])|18[.](1[01]|[0-9]))[+].*$|4[.]17[.].*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:31e8978d1f7a24c3e70dcc12c93dd5e73311b78e528f73beb020ddbe3270e07d","url":"https://access.redhat.com/errata/RHSA-2025:4427"}},{"version":"4.18.31","payload":"quay.io/openshift-release-dev/ocp-release@sha256:64e8c4cbd3199beb1ebb8d6a208cb81fdfe62f4e52d7586033f534d35f981244","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:64e8c4cbd3199beb1ebb8d6a208cb81fdfe62f4e52d7586033f534d35f981244","url":"https://access.redhat.com/errata/RHSA-2026:338"}},{"version":"4.19.30","payload":"quay.io/openshift-release-dev/ocp-release@sha256:7a772b30ed4234520e722cbdf2078d05254a7bc38735abc11f7019fb507deeda","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:7a772b30ed4234520e722cbdf2078d05254a7bc38735abc11f7019fb507deeda","url":"https://access.redhat.com/errata/RHBA-2026:13720"}},{"version":"4.18.32","payload":"quay.io/openshift-release-dev/ocp-release@sha256:6177c447b98c36a42fd45fa2ba413da73d14d0a7ad3aecfa977554f5ae9583cc","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:6177c447b98c36a42fd45fa2ba413da73d14d0a7ad3aecfa977554f5ae9583cc","url":"https://access.redhat.com/errata/RHSA-2026:1062"}},{"version":"4.18.3","payload":"quay.io/openshift-release-dev/ocp-release@sha256:fdcb3da3a1086d664df31a1fa2a629c77780f844d458af956928cca297da343c","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":".*|4[.]17[.].*|4[.](17[.](1[01]|0-.*|[0-9])|18.0-(ec[.].*|rc[.][0-3]))|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:fdcb3da3a1086d664df31a1fa2a629c77780f844d458af956928cca297da343c","url":"https://access.redhat.com/errata/RHBA-2025:2229"}},{"version":"4.20.0","payload":"quay.io/openshift-release-dev/ocp-release@sha256:d1dc76522d1e235b97675b28e977cb8c452f47d39c0eb519cde02114925f91d2","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$|4[.]19[.]1[67][+].*","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:d1dc76522d1e235b97675b28e977cb8c452f47d39c0eb519cde02114925f91d2","url":"https://access.redhat.com/errata/RHSA-2025:9562"}},{"version":"4.19.2","payload":"quay.io/openshift-release-dev/ocp-release@sha256:1293f5ccad2a2776241344faecaf7320f60ee91882df4e24b309f3a7cefc04be","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.]18[.]1[6-8][+].*$|4.18[.].*|4.18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:1293f5ccad2a2776241344faecaf7320f60ee91882df4e24b309f3a7cefc04be","url":"https://access.redhat.com/errata/RHSA-2025:9750"}},{"version":"4.20.20","payload":"quay.io/openshift-release-dev/ocp-release@sha256:f3d952e9a20de0c5db249c6c7771d6b71d4eaa9d269d5da8868e6652f6dbec0d","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:f3d952e9a20de0c5db249c6c7771d6b71d4eaa9d269d5da8868e6652f6dbec0d","url":"https://access.redhat.com/errata/RHBA-2026:12066"}},{"version":"4.18.13","payload":"quay.io/openshift-release-dev/ocp-release@sha256:a93c65b0f9de1d2e29641fbeebc07178733db1cacc7bde178033d7b9183540bc","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.](2[0-8]|[1]?[0-9])|18[.](1[01]|[0-9]))[+].*$|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:a93c65b0f9de1d2e29641fbeebc07178733db1cacc7bde178033d7b9183540bc","url":"https://access.redhat.com/errata/RHSA-2025:4712"}},{"version":"4.18.25","payload":"quay.io/openshift-release-dev/ocp-release@sha256:ba6f0f2eca65cd386a5109ddbbdb3bab9bb9801e32de56ef34f80e634a7787be","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.]([1-3]?[0-9])|18[.](1?[0-9]|2[0-3]))[+].*$|^4[.](17[.].*|18[.](1?[0-9]|2[0-2]))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:ba6f0f2eca65cd386a5109ddbbdb3bab9bb9801e32de56ef34f80e634a7787be","url":"https://access.redhat.com/errata/RHBA-2025:16732"}},{"version":"4.18.8","payload":"quay.io/openshift-release-dev/ocp-release@sha256:509888097ba7d3b4eeb5aac0586acff2ec13fff07004ac692e0dcf5cf4fe2690","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]17[.].*|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:509888097ba7d3b4eeb5aac0586acff2ec13fff07004ac692e0dcf5cf4fe2690","url":"https://access.redhat.com/errata/RHSA-2025:3577"}},{"version":"4.18.24","payload":"quay.io/openshift-release-dev/ocp-release@sha256:2db093f063ad5310fa4e5ed2d2eda4bad5215c47092b72d1cfafbcfdbf1f4dd2","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.]([1-3]?[0-9])|18[.](1?[0-9]|2[0-3]))[+].*$|^4[.](17[.].*|18[.](1?[0-9]|2[0-2]))[+].*$|^4[.](17[.].*|18[.](1?[0-9]|2[0-1]))[+].*$|^4[.](18[.]([0-9]|1[0-9]|2[0-1])|17[.](([0-9]|[1-2][0-9]|3[0-7])))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:2db093f063ad5310fa4e5ed2d2eda4bad5215c47092b72d1cfafbcfdbf1f4dd2","url":"https://access.redhat.com/errata/RHBA-2025:15714"}},{"version":"4.18.14","payload":"quay.io/openshift-release-dev/ocp-release@sha256:78c0475ba249e03b0ed5b3d3cca619020a2996fb75efb9e7b5a2d5972fbdac7c","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.](2[0-8]|[1]?[0-9])|18[.](1[01]|[0-9]))[+].*$|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:78c0475ba249e03b0ed5b3d3cca619020a2996fb75efb9e7b5a2d5972fbdac7c","url":"https://access.redhat.com/errata/RHSA-2025:7863"}},{"version":"4.20.13","payload":"quay.io/openshift-release-dev/ocp-release@sha256:280ffe256696c1b3699a5d0bc9c520b785a0c6acff0ed58073adbd206185c4b9","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:280ffe256696c1b3699a5d0bc9c520b785a0c6acff0ed58073adbd206185c4b9","url":"https://access.redhat.com/errata/RHSA-2026:1555"}},{"version":"4.18.34","payload":"quay.io/openshift-release-dev/ocp-release@sha256:14bd3c04daa885009785d48f4973e2890751a7ec116cc14d17627245cda54d7b","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:14bd3c04daa885009785d48f4973e2890751a7ec116cc14d17627245cda54d7b","url":"https://access.redhat.com/errata/RHSA-2026:2977"}},{"version":"4.18.33","payload":"quay.io/openshift-release-dev/ocp-release@sha256:40bb7cf7c637bf9efd8fb0157839d325a019d67cc7d7279665fcf90dbb7f3f33","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:40bb7cf7c637bf9efd8fb0157839d325a019d67cc7d7279665fcf90dbb7f3f33","url":"https://access.redhat.com/errata/RHSA-2026:2078"}},{"version":"4.19.3","payload":"quay.io/openshift-release-dev/ocp-release@sha256:0b44c4b526b4743e744cb989c6fc768fdfd9ac9abffc8f43a014bb90b7bf522d","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4.18.*|^4[.]18[.]1[6-8][+].*$|4.18[.].*|4.18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*|4[.]18[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:0b44c4b526b4743e744cb989c6fc768fdfd9ac9abffc8f43a014bb90b7bf522d","url":"https://access.redhat.com/errata/RHBA-2025:10290"}},{"version":"4.18.15","payload":"quay.io/openshift-release-dev/ocp-release@sha256:0ebcecebc52a63285669ed74f0e591865b702de34c0a488cbba02dfb53d71cbe","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](17[.](2[0-8]|[1]?[0-9])|18[.](1[01]|[0-9]))[+].*$|^4[.](17[.]([0-9]|1[0-6]))[+].*$|4[.]17[.].*|^4[.](17[.]([1]?[0-9]|2[0-1])|18[.][0-5])[+].*$|4[.]17[.].*","io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:0ebcecebc52a63285669ed74f0e591865b702de34c0a488cbba02dfb53d71cbe","url":"https://access.redhat.com/errata/RHBA-2025:8104"}},{"version":"4.20.10","payload":"quay.io/openshift-release-dev/ocp-release@sha256:2d228e6d0b5a5ef2d7eb40bc171ad44f06b990d7adb678914e5d9d047e72568d","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"4[.]19[.].*|^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:2d228e6d0b5a5ef2d7eb40bc171ad44f06b990d7adb678914e5d9d047e72568d","url":"https://access.redhat.com/errata/RHBA-2026:370"}},{"version":"4.20.15","payload":"quay.io/openshift-release-dev/ocp-release@sha256:a60fbe523d8ad802ab9bcbb4c505f5fe4467283fc748e4978fe9a3b280145d75","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.]19[.]([0-9]|1[0-7])[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.20,eus-4.20,fast-4.20,stable-4.20,candidate-4.21,fast-4.21,stable-4.21,candidate-4.22","io.openshift.upgrades.graph.release.manifestref":"sha256:a60fbe523d8ad802ab9bcbb4c505f5fe4467283fc748e4978fe9a3b280145d75","url":"https://access.redhat.com/errata/RHBA-2026:2987"}},{"version":"4.19.16","payload":"quay.io/openshift-release-dev/ocp-release@sha256:8f57c0a381695f49c15e4b337f0259a02de2cfa10be3882a6fb795c34217d212","metadata":{"io.openshift.upgrades.graph.previous.remove_regex":"^4[.](18[.](1?[0-9]|2[0-2])|19[.][0-8])[+].*$|4[.]18[.].*|^4[.](18[.][0-9]*|19[.]([0-9]|1[0-2]))[+].*$","io.openshift.upgrades.graph.release.channels":"candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:8f57c0a381695f49c15e4b337f0259a02de2cfa10be3882a6fb795c34217d212","url":"https://access.redhat.com/errata/RHBA-2025:17662"}},{"version":"4.18.35","payload":"quay.io/openshift-release-dev/ocp-release@sha256:59727c4b3fef19e5149675cf3350735bbfe2c6588a57654b2e4552dd719f58b1","metadata":{"io.openshift.upgrades.graph.release.channels":"candidate-4.18,eus-4.18,fast-4.18,stable-4.18,candidate-4.19,fast-4.19,stable-4.19,candidate-4.20,eus-4.20,fast-4.20,stable-4.20","io.openshift.upgrades.graph.release.manifestref":"sha256:59727c4b3fef19e5149675cf3350735bbfe2c6588a57654b2e4552dd719f58b1","url":"https://access.redhat.com/errata/RHSA-2026:3905"}}],"edges":[[46,61],[42,69],[0,81],[13,54],[36,41],[22,85],[32,73],[41,15],[21,48],[22,37],[32,61],[43,29],[50,5],[1,14],[26,46],[18,62],[42,15],[69,33],[86,62],[13,73],[76,33],[74,7],[80,27],[34,45],[4,11],[49,54],[22,68],[48,5],[41,81],[59,62],[58,81],[46,25],[11,80],[51,62],[79,29],[39,2],[74,15],[16,22],[35,28],[76,20],[20,80],[67,27],[13,25],[70,20],[56,86],[7,45],[82,48],[66,7],[20,7],[80,62],[84,61],[15,27],[6,85],[17,5],[65,68],[75,62],[57,67],[84,29],[53,7],[63,48],[6,61],[36,40],[21,85],[6,68],[36,81],[3,81],[36,45],[66,69],[9,73],[58,68],[2,62],[30,62],[21,68],[34,28],[46,85],[49,79],[52,69],[58,67],[7,40],[13,79],[13,29],[55,45],[28,45],[4,7],[74,28],[41,52],[75,45],[2,73],[19,27],[52,80],[16,62],[78,33],[55,33],[0,27],[10,23],[52,67],[28,41],[74,83],[76,15],[28,68],[83,40],[60,81],[83,69],[53,81],[3,41],[83,52],[33,62],[76,67],[57,45],[75,69],[13,61],[42,33],[11,33],[83,11],[66,4],[67,80],[34,67],[16,25],[20,33],[30,37],[14,48],[56,2],[21,32],[46,10],[6,32],[52,7],[65,12],[9,84],[46,23],[69,62],[34,20],[28,67],[49,84],[50,62],[19,40],[57,28],[77,81],[55,81],[84,23],[21,22],[8,14],[78,40],[10,25],[23,85],[17,79],[64,73],[32,30],[37,85],[42,45],[21,30],[52,15],[1,68],[60,20],[66,11],[42,27],[48,29],[30,43],[59,5],[55,7],[53,45],[43,48],[49,85],[76,87],[57,87],[3,87],[28,87],[32,37],[22,25],[54,29],[42,87],[16,68],[24,67],[46,79],[34,87],[21,73],[19,87],[67,87],[80,87],[81,87],[69,87],[40,87],[46,54],[61,5],[34,52],[24,87],[70,87],[15,87],[11,20],[3,80],[52,68],[49,9],[66,28],[7,87],[46,5],[48,62],[60,80],[35,20],[27,68],[39,68],[11,69],[40,67],[21,29],[40,62],[36,87],[71,26],[49,61],[28,27],[58,27],[24,81],[72,62],[83,33],[26,9],[60,41],[80,45],[35,33],[65,62],[41,27],[72,48],[54,85],[40,45],[3,33],[4,27],[75,87],[42,67],[43,68],[28,7],[76,27],[29,73],[0,87],[11,27],[59,29],[61,85],[78,28],[38,62],[42,40],[74,80],[83,87],[49,29],[54,5],[20,52],[21,25],[70,28],[4,81],[26,84],[12,72],[64,62],[19,67],[26,61],[58,62],[74,87],[4,80],[77,87],[20,15],[55,41],[78,87],[24,80],[53,87],[52,87],[58,87],[11,81],[66,80],[19,15],[17,84],[41,87],[16,43],[25,73],[23,29],[0,33],[19,45],[66,52],[60,45],[14,62],[0,80],[61,73],[69,68],[42,3],[43,5],[46,84],[3,45],[7,80],[23,25],[70,40],[74,20],[42,80],[43,62],[75,40],[62,68],[66,87],[38,48],[48,68],[36,69],[40,80],[49,23],[8,62],[0,45],[45,62],[70,27],[50,73],[25,5],[69,81],[34,15],[19,28],[71,9],[6,48],[83,4],[53,15],[28,15],[13,23],[15,68],[83,80],[67,68],[86,5],[82,68],[26,25],[9,25],[12,62],[37,73],[42,57],[59,68],[66,41],[31,68],[0,67],[21,62],[10,29],[52,40],[62,5],[23,5],[7,69],[53,68],[79,85],[9,85],[15,40],[55,52],[16,30],[31,48],[79,25],[26,73],[55,69],[19,69],[42,41],[17,23],[7,67],[81,45],[22,32],[6,37],[3,52],[65,72],[76,45],[75,0],[11,67],[63,62],[87,45],[81,68],[76,81],[73,5],[58,15],[60,67],[78,67],[24,69],[84,85],[77,62],[67,45],[35,81],[77,40],[46,9],[58,33],[77,27],[80,68],[17,29],[66,20],[0,40],[66,78],[59,86],[83,7],[78,15],[3,15],[78,52],[4,87],[3,67],[83,27],[61,29],[10,85],[40,68],[47,5],[4,33],[69,27],[36,20],[6,62],[2,48],[75,27],[3,27],[34,27],[36,52],[4,45],[40,27],[67,81],[81,80],[52,33],[31,38],[36,33],[64,68],[87,68],[41,40],[52,81],[25,29],[30,61],[87,27],[84,73],[67,62],[71,46],[85,25],[72,68],[70,15],[35,69],[74,27],[77,0],[4,67],[7,62],[59,25],[42,52],[42,7],[45,68],[41,33],[11,15],[37,62],[37,5],[7,15],[71,73],[40,69],[24,68],[35,41],[74,78],[40,81],[66,15],[58,69],[71,79],[32,68],[3,69],[60,69],[24,62],[4,69],[53,33],[15,45],[74,11],[57,15],[77,45],[6,5],[57,69],[0,15],[22,29],[78,45],[26,23],[7,81],[64,5],[57,20],[3,7],[66,27],[53,80],[27,33],[77,33],[43,73],[19,7],[28,62],[75,15],[18,68],[48,73],[60,15],[17,73],[0,68],[12,68],[6,73],[85,73],[70,7],[33,68],[86,73],[28,52],[11,41],[57,27],[71,5],[60,52],[16,5],[86,48],[19,80],[53,40],[55,28],[28,80],[9,54],[60,87],[18,48],[15,62],[22,73],[70,45],[70,81],[77,80],[57,76],[32,43],[40,33],[36,27],[31,63],[57,3],[36,67],[54,25],[34,80],[35,52],[17,61],[76,3],[20,41],[80,33],[54,79],[75,33],[35,87],[35,40],[49,5],[30,5],[82,62],[42,76],[59,2],[6,22],[7,33],[23,61],[10,79],[83,81],[57,81],[16,85],[10,9],[22,30],[37,43],[74,40],[32,48],[87,62],[32,25],[63,68],[62,73],[10,54],[41,67],[11,87],[42,28],[78,4],[28,40],[57,7],[78,11],[15,80],[50,68],[58,80],[58,40],[74,45],[36,15],[39,48],[11,28],[19,41],[0,7],[34,7],[17,25],[13,9],[26,79],[2,5],[0,69],[57,33],[70,33],[21,5],[30,48],[86,25],[12,82],[10,73],[35,45],[26,5],[13,5],[52,62],[85,29],[75,7],[17,54],[3,40],[57,40],[59,73],[74,52],[69,45],[55,27],[83,28],[51,68],[8,68],[71,61],[56,48],[30,29],[24,45],[81,62],[55,40],[4,15],[81,27],[76,40],[71,84],[41,45],[24,40],[20,27],[53,69],[39,62],[19,20],[15,33],[78,83],[30,85],[66,45],[23,79],[14,68],[10,84],[81,33],[55,67],[83,67],[41,7],[84,25],[70,41],[53,67],[3,28],[84,79],[26,54],[20,28],[78,27],[16,48],[3,20],[76,41],[31,62],[77,68],[37,68],[77,7],[77,69],[9,5],[19,52],[83,15],[35,80],[1,48],[34,81],[66,83],[20,40],[52,45],[30,73],[74,67],[77,15],[76,80],[16,32],[66,33],[15,81],[22,48],[6,25],[78,80],[68,5],[46,29],[21,43],[35,27],[69,80],[49,73],[20,67],[42,20],[71,23],[38,63],[26,85],[85,5],[70,80],[71,54],[1,8],[41,69],[66,74],[32,85],[77,75],[41,68],[6,30],[15,67],[87,33],[78,81],[44,73],[61,25],[70,69],[57,52],[11,40],[54,61],[75,68],[51,48],[27,45],[28,33],[0,62],[34,41],[16,29],[20,81],[75,80],[53,62],[7,27],[20,45],[52,27],[1,62],[34,69],[83,41],[78,41],[35,67],[74,33],[37,29],[36,28],[36,80],[28,69],[75,81],[22,43],[19,33],[22,5],[41,62],[44,5],[36,7],[78,69],[60,28],[9,61],[57,80],[58,45],[42,81],[71,10],[84,54],[7,68],[20,87],[55,87],[37,25],[17,9],[70,67],[76,52],[49,25],[4,41],[15,69],[83,20],[41,80],[66,40],[83,45],[65,48],[33,45],[75,67],[72,82],[37,48],[86,68],[29,5],[47,73],[71,85],[78,20],[78,7],[59,48],[84,5],[22,61],[56,62],[54,23],[60,40],[19,81],[22,62],[79,61],[70,52],[58,7],[6,43],[76,69],[26,29],[16,61],[77,67],[66,81],[24,33],[4,20],[34,33],[74,4],[6,29],[56,68],[32,5],[27,62],[55,20],[2,68],[23,73],[9,29],[60,33],[38,68],[2,25],[21,37],[67,33],[11,52],[16,37],[28,81],[9,79],[39,86],[4,52],[57,41],[79,73],[24,27],[34,40],[21,61],[2,29],[16,73],[55,80],[66,67],[26,10],[54,73],[67,69],[46,73],[65,82],[74,81],[13,85],[76,7],[13,84],[11,7],[8,48],[30,68],[20,69],[11,45],[53,27],[86,2],[30,25],[9,23],[76,28],[32,29],[10,61],[74,41],[71,25],[35,7],[79,5],[10,5],[60,27],[86,29],[71,29],[35,15],[4,40],[17,85],[60,7],[4,28],[55,15],[74,69],[32,62],[12,48]],"conditionalEdges":[{"edges":[{"from":"4.18.19","to":"4.19.4"},{"from":"4.18.20","to":"4.19.4"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1110","name":"HCPServiceHealthCheckDisruption","message":"When Hosted Control Plane (HCP/HyperShift) clusters running on AWS update a node pool, the Services of type\nLoadBalancer may experience temporary availability disruption because health checks are not set up properly to monitor\nNode readiness state.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\n  or\n  0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\n* on (_id) group_left (type) (\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3357","name":"OldBootImagesComposeFSvsGrubProbe","message":"Upgrade to 4.19 will fail due to a boot image incompatibility issue if a cluster was born in 4.2 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  label_replace(group by (version) (cluster_version{_id=\"\",type=\"initial\",version=~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"yes, so possibly actually born in 4.2 or earlier\", \"\", \"\")\n  or\n  label_replace(0 * group by (version) (cluster_version{_id=\"\",type=\"initial\",version!~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"no, born in 4.10 or later\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.19.13","to":"4.19.27"},{"from":"4.19.9","to":"4.19.28"},{"from":"4.19.27","to":"4.20.19"},{"from":"4.19.20","to":"4.19.28"},{"from":"4.20.2","to":"4.20.19"},{"from":"4.20.8","to":"4.20.18"},{"from":"4.19.18","to":"4.20.18"},{"from":"4.19.23","to":"4.20.19"},{"from":"4.19.3","to":"4.19.28"},{"from":"4.19.25","to":"4.20.19"},{"from":"4.19.7","to":"4.19.27"},{"from":"4.19.12","to":"4.19.28"},{"from":"4.20.10","to":"4.20.18"},{"from":"4.19.9","to":"4.19.27"},{"from":"4.19.6","to":"4.19.28"},{"from":"4.20.18","to":"4.20.19"},{"from":"4.19.21","to":"4.19.28"},{"from":"4.19.10","to":"4.19.27"},{"from":"4.19.14","to":"4.19.28"},{"from":"4.20.6","to":"4.20.18"},{"from":"4.19.11","to":"4.19.27"},{"from":"4.20.5","to":"4.20.18"},{"from":"4.19.19","to":"4.20.18"},{"from":"4.20.12","to":"4.20.18"},{"from":"4.19.15","to":"4.19.28"},{"from":"4.20.13","to":"4.20.19"},{"from":"4.19.17","to":"4.20.18"},{"from":"4.19.11","to":"4.19.28"},{"from":"4.19.20","to":"4.20.18"},{"from":"4.19.27","to":"4.20.18"},{"from":"4.19.27","to":"4.19.28"},{"from":"4.19.22","to":"4.19.27"},{"from":"4.19.24","to":"4.20.18"},{"from":"4.19.16","to":"4.19.28"},{"from":"4.20.1","to":"4.20.19"},{"from":"4.20.2","to":"4.20.18"},{"from":"4.20.15","to":"4.20.18"},{"from":"4.19.24","to":"4.19.28"},{"from":"4.19.2","to":"4.19.27"},{"from":"4.20.5","to":"4.20.19"},{"from":"4.19.21","to":"4.20.19"},{"from":"4.20.4","to":"4.20.18"},{"from":"4.20.11","to":"4.20.18"},{"from":"4.20.16","to":"4.20.18"},{"from":"4.20.14","to":"4.20.18"},{"from":"4.19.19","to":"4.20.19"},{"from":"4.20.14","to":"4.20.19"},{"from":"4.19.0","to":"4.19.27"},{"from":"4.20.10","to":"4.20.19"},{"from":"4.19.18","to":"4.19.28"},{"from":"4.19.28","to":"4.20.19"},{"from":"4.19.17","to":"4.19.28"},{"from":"4.19.10","to":"4.19.28"},{"from":"4.19.7","to":"4.19.28"},{"from":"4.19.18","to":"4.20.19"},{"from":"4.20.3","to":"4.20.18"},{"from":"4.19.21","to":"4.19.27"},{"from":"4.20.6","to":"4.20.19"},{"from":"4.19.20","to":"4.20.19"},{"from":"4.20.17","to":"4.20.19"},{"from":"4.19.23","to":"4.19.28"},{"from":"4.20.17","to":"4.20.18"},{"from":"4.19.16","to":"4.19.27"},{"from":"4.20.0","to":"4.20.19"},{"from":"4.19.25","to":"4.20.18"},{"from":"4.19.5","to":"4.19.27"},{"from":"4.19.4","to":"4.19.28"},{"from":"4.20.0","to":"4.20.18"},{"from":"4.19.5","to":"4.19.28"},{"from":"4.19.22","to":"4.19.28"},{"from":"4.19.25","to":"4.19.27"},{"from":"4.20.13","to":"4.20.18"},{"from":"4.19.0","to":"4.19.28"},{"from":"4.19.19","to":"4.19.27"},{"from":"4.19.14","to":"4.19.27"},{"from":"4.19.16","to":"4.20.19"},{"from":"4.19.6","to":"4.19.27"},{"from":"4.19.26","to":"4.20.18"},{"from":"4.20.16","to":"4.20.19"},{"from":"4.19.1","to":"4.19.27"},{"from":"4.19.26","to":"4.20.19"},{"from":"4.19.4","to":"4.19.27"},{"from":"4.20.3","to":"4.20.19"},{"from":"4.19.2","to":"4.19.28"},{"from":"4.19.19","to":"4.19.28"},{"from":"4.19.15","to":"4.20.18"},{"from":"4.19.22","to":"4.20.19"},{"from":"4.20.1","to":"4.20.18"},{"from":"4.20.4","to":"4.20.19"},{"from":"4.20.8","to":"4.20.19"},{"from":"4.19.24","to":"4.20.19"},{"from":"4.19.17","to":"4.19.27"},{"from":"4.19.23","to":"4.20.18"},{"from":"4.19.13","to":"4.19.28"},{"from":"4.19.25","to":"4.19.28"},{"from":"4.19.1","to":"4.19.28"},{"from":"4.19.15","to":"4.20.19"},{"from":"4.19.18","to":"4.19.27"},{"from":"4.19.22","to":"4.20.18"},{"from":"4.19.15","to":"4.19.27"},{"from":"4.19.12","to":"4.19.27"},{"from":"4.19.17","to":"4.20.19"},{"from":"4.20.11","to":"4.20.19"},{"from":"4.19.26","to":"4.19.28"},{"from":"4.19.23","to":"4.19.27"},{"from":"4.20.12","to":"4.20.19"},{"from":"4.19.16","to":"4.20.18"},{"from":"4.19.24","to":"4.19.27"},{"from":"4.19.26","to":"4.19.27"},{"from":"4.20.15","to":"4.20.19"},{"from":"4.19.3","to":"4.19.27"},{"from":"4.19.21","to":"4.20.18"},{"from":"4.19.20","to":"4.19.27"}],"risks":[{"url":"https://redhat.atlassian.net/browse/CORENET-6950","name":"PrecisionTimeProtocolDPLLPins","message":"Clusters using older PTP operators may struggle to synchronize system clocks and might not provide time to downstream clients.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, name) (csv_succeeded{_id=\"\", name=~\"ptp-operator[.]v4[.][0-9]*[.]0-(202[3-5]|20260[1-9])[0-9]*\"})\nor on (_id)\n0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"ptp-operator.v4.y.0-20260331... or older not installed\", \"\", \"\")\n"}}]}]},{"edges":[{"from":"4.18.21","to":"4.19.15"},{"from":"4.18.19","to":"4.19.14"},{"from":"4.18.19","to":"4.19.15"},{"from":"4.18.22","to":"4.19.15"},{"from":"4.18.20","to":"4.19.14"},{"from":"4.18.20","to":"4.19.15"},{"from":"4.18.21","to":"4.19.14"},{"from":"4.18.22","to":"4.19.14"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.7","to":"4.19.9"},{"from":"4.19.7","to":"4.19.10"},{"from":"4.19.7","to":"4.19.11"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.7","to":"4.18.22"},{"from":"4.18.19","to":"4.18.22"},{"from":"4.18.4","to":"4.18.22"},{"from":"4.18.21","to":"4.18.22"},{"from":"4.18.18","to":"4.18.22"},{"from":"4.18.3","to":"4.18.22"},{"from":"4.18.15","to":"4.18.22"},{"from":"4.18.14","to":"4.18.22"},{"from":"4.18.12","to":"4.18.22"},{"from":"4.18.9","to":"4.18.22"},{"from":"4.18.5","to":"4.18.22"},{"from":"4.18.16","to":"4.18.22"},{"from":"4.18.8","to":"4.18.22"},{"from":"4.18.20","to":"4.18.22"},{"from":"4.18.6","to":"4.18.22"},{"from":"4.18.2","to":"4.18.22"},{"from":"4.18.13","to":"4.18.22"},{"from":"4.18.10","to":"4.18.22"},{"from":"4.18.11","to":"4.18.22"},{"from":"4.18.17","to":"4.18.22"},{"from":"4.18.1","to":"4.18.22"}],"risks":[{"url":"https://issues.redhat.com/browse/RUN-3446","name":"CrunConflictsWithNVIDIA","message":"Some crun 1.23 releases conflict with the NVIDIA GPU Operator over eBPF, causing issues with GPU workloads.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (name) (csv_succeeded{_id=\"\", name=~\"gpu-operator-certified[.].*\"})\nor on (_id)\n0 * group(csv_count{_id=\"\"})"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.19.10","to":"4.19.14"},{"from":"4.19.12","to":"4.19.15"},{"from":"4.19.11","to":"4.19.15"},{"from":"4.19.11","to":"4.19.14"},{"from":"4.19.10","to":"4.19.15"},{"from":"4.19.12","to":"4.19.14"},{"from":"4.19.9","to":"4.19.15"},{"from":"4.19.9","to":"4.19.14"}],"risks":[{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.18","to":"4.19.2"},{"from":"4.18.17","to":"4.19.1"},{"from":"4.18.16","to":"4.19.0"},{"from":"4.18.16","to":"4.19.1"},{"from":"4.18.16","to":"4.19.2"},{"from":"4.18.17","to":"4.19.2"},{"from":"4.18.18","to":"4.19.1"},{"from":"4.18.17","to":"4.19.0"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MON-4270","name":"AlertmanagerV1NotSupported","message":"Upgrade to OpenShift 4.19 will fail on cluster monitoring operator in case API version v1 of Alertmanager\nis still specified in the ConfigMaps \"cluster-monitoring-config\" or \"user-workload-monitoring-config\".","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1109","name":"HCPMetallbCNOCannotDeployFRRK8S","message":"On Hosted Control Plane (HCP/HyperShift) clusters with installed MetalLB operator, Cluster Network Operator fails to\ndeploy a critical component FRR-k8s when updated. MetalLB will stop working properly and stop advertising services,\nmaking them potentially unreachable from outside the cluster.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\ngroup by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\nand on (_id) (\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"metallb-operator[.].*\"})\n)\nor on (_id) (\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"metallb operator not installed\", \"name\", \".*\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1110","name":"HCPServiceHealthCheckDisruption","message":"When Hosted Control Plane (HCP/HyperShift) clusters running on AWS update a node pool, the Services of type\nLoadBalancer may experience temporary availability disruption because health checks are not set up properly to monitor\nNode readiness state.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\n  or\n  0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\n* on (_id) group_left (type) (\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3357","name":"OldBootImagesComposeFSvsGrubProbe","message":"Upgrade to 4.19 will fail due to a boot image incompatibility issue if a cluster was born in 4.2 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  label_replace(group by (version) (cluster_version{_id=\"\",type=\"initial\",version=~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"yes, so possibly actually born in 4.2 or earlier\", \"\", \"\")\n  or\n  label_replace(0 * group by (version) (cluster_version{_id=\"\",type=\"initial\",version!~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"no, born in 4.10 or later\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/STOR-2486","name":"VSphereStorageMountIssues","message":"vSphere customers using vSAN file volumes can't mount vSphere shared volumes and NFS volumes which server do not set NFS4ERR_ATTRNOTSUPP","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_infrastructure_provider{type=~\"VSphere|None\"})\nor\n0 * group(cluster_infrastructure_provider)\n"}}]}]},{"edges":[{"from":"4.18.22","to":"4.18.23"}],"risks":[{"url":"https://issues.redhat.com/browse/RUN-3446","name":"CrunConflictsWithNVIDIA","message":"Some crun 1.23 releases conflict with the NVIDIA GPU Operator over eBPF, causing issues with GPU workloads.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (name) (csv_succeeded{_id=\"\", name=~\"gpu-operator-certified[.].*\"})\nor on (_id)\n0 * group(csv_count{_id=\"\"})"}}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.5","to":"4.19.9"},{"from":"4.19.4","to":"4.19.10"},{"from":"4.19.5","to":"4.19.10"},{"from":"4.19.4","to":"4.19.11"},{"from":"4.19.6","to":"4.19.10"},{"from":"4.19.4","to":"4.19.9"},{"from":"4.19.6","to":"4.19.11"},{"from":"4.19.6","to":"4.19.9"},{"from":"4.19.5","to":"4.19.11"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.28","to":"4.19.19"},{"from":"4.18.22","to":"4.19.20"},{"from":"4.18.27","to":"4.19.20"},{"from":"4.18.24","to":"4.19.20"},{"from":"4.18.23","to":"4.19.19"},{"from":"4.18.26","to":"4.19.19"},{"from":"4.18.28","to":"4.19.20"},{"from":"4.18.20","to":"4.19.20"},{"from":"4.18.26","to":"4.19.20"},{"from":"4.18.22","to":"4.19.19"},{"from":"4.18.25","to":"4.19.20"},{"from":"4.18.19","to":"4.19.19"},{"from":"4.18.24","to":"4.19.19"},{"from":"4.18.23","to":"4.19.20"},{"from":"4.18.20","to":"4.19.19"},{"from":"4.18.25","to":"4.19.19"},{"from":"4.18.29","to":"4.19.20"},{"from":"4.18.21","to":"4.19.20"},{"from":"4.18.27","to":"4.19.19"},{"from":"4.18.21","to":"4.19.19"},{"from":"4.18.19","to":"4.19.20"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/RUN-3748","name":"RuncShareProcessNamespace","message":"Some runc 1.2 releases fail to launch containers in some Pods where shareProcessNamespace is explicitly set true.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.19.9","to":"4.19.18"},{"from":"4.19.7","to":"4.19.18"},{"from":"4.19.10","to":"4.19.18"},{"from":"4.19.11","to":"4.19.18"},{"from":"4.19.12","to":"4.19.18"},{"from":"4.19.0","to":"4.19.18"},{"from":"4.19.3","to":"4.19.18"},{"from":"4.19.2","to":"4.19.18"},{"from":"4.19.5","to":"4.19.18"},{"from":"4.19.4","to":"4.19.18"},{"from":"4.19.6","to":"4.19.18"},{"from":"4.19.1","to":"4.19.18"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3765","name":"SCOSBootImage","message":"The coreos-bootimages ConfigMap in the openshift-machine-config-operator Namespace thrashes between RHCOS and SCOS content.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.18.21","to":"4.19.11"},{"from":"4.18.19","to":"4.19.10"},{"from":"4.18.20","to":"4.19.10"},{"from":"4.18.19","to":"4.19.11"},{"from":"4.18.20","to":"4.19.11"},{"from":"4.18.21","to":"4.19.10"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.24","to":"4.19.14"},{"from":"4.18.23","to":"4.19.14"},{"from":"4.18.25","to":"4.19.14"},{"from":"4.18.23","to":"4.19.15"},{"from":"4.18.25","to":"4.19.15"},{"from":"4.18.24","to":"4.19.15"}],"risks":[{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.1","to":"4.19.14"},{"from":"4.19.5","to":"4.19.14"},{"from":"4.19.6","to":"4.19.15"},{"from":"4.19.6","to":"4.19.14"},{"from":"4.19.4","to":"4.19.14"},{"from":"4.19.7","to":"4.19.14"},{"from":"4.19.3","to":"4.19.14"},{"from":"4.19.0","to":"4.19.15"},{"from":"4.19.2","to":"4.19.14"},{"from":"4.19.7","to":"4.19.15"},{"from":"4.19.4","to":"4.19.15"},{"from":"4.19.2","to":"4.19.15"},{"from":"4.19.5","to":"4.19.15"},{"from":"4.19.3","to":"4.19.15"},{"from":"4.19.1","to":"4.19.15"},{"from":"4.19.0","to":"4.19.14"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.4","to":"4.18.5"},{"from":"4.18.1","to":"4.18.4"},{"from":"4.18.1","to":"4.18.5"},{"from":"4.18.2","to":"4.18.4"},{"from":"4.18.2","to":"4.18.3"},{"from":"4.18.2","to":"4.18.5"},{"from":"4.18.1","to":"4.18.3"},{"from":"4.18.3","to":"4.18.5"},{"from":"4.18.3","to":"4.18.4"},{"from":"4.18.1","to":"4.18.2"}],"risks":[{"url":"https://issues.redhat.com/browse/OCPNODE-3074","name":"CRIOLayerCompressionPulls","message":"The CRI-O container runtime may fail to pull images with certain layer compression characteristics","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.18.25","to":"4.19.16"},{"from":"4.18.24","to":"4.19.17"},{"from":"4.18.26","to":"4.19.17"},{"from":"4.18.22","to":"4.19.17"},{"from":"4.18.21","to":"4.19.17"},{"from":"4.18.23","to":"4.19.16"},{"from":"4.18.24","to":"4.19.16"},{"from":"4.18.23","to":"4.19.17"},{"from":"4.18.19","to":"4.19.17"},{"from":"4.18.25","to":"4.19.17"},{"from":"4.18.26","to":"4.19.16"},{"from":"4.18.20","to":"4.19.17"}],"risks":[{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.19.11","to":"4.19.13"},{"from":"4.19.12","to":"4.19.13"},{"from":"4.19.9","to":"4.19.13"},{"from":"4.19.10","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.3","to":"4.19.11"},{"from":"4.19.0","to":"4.19.11"},{"from":"4.19.1","to":"4.19.9"},{"from":"4.19.3","to":"4.19.9"},{"from":"4.19.0","to":"4.19.10"},{"from":"4.19.2","to":"4.19.9"},{"from":"4.19.0","to":"4.19.9"},{"from":"4.19.1","to":"4.19.10"},{"from":"4.19.2","to":"4.19.10"},{"from":"4.19.1","to":"4.19.11"},{"from":"4.19.2","to":"4.19.11"},{"from":"4.19.3","to":"4.19.10"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.26","to":"4.19.18"},{"from":"4.18.25","to":"4.19.18"},{"from":"4.18.24","to":"4.19.18"},{"from":"4.18.21","to":"4.19.18"},{"from":"4.18.20","to":"4.19.18"},{"from":"4.18.22","to":"4.19.18"},{"from":"4.18.19","to":"4.19.18"},{"from":"4.18.23","to":"4.19.18"},{"from":"4.18.27","to":"4.19.18"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3765","name":"SCOSBootImage","message":"The coreos-bootimages ConfigMap in the openshift-machine-config-operator Namespace thrashes between RHCOS and SCOS content.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.19.0","to":"4.19.17"},{"from":"4.19.11","to":"4.19.16"},{"from":"4.19.3","to":"4.19.17"},{"from":"4.19.7","to":"4.19.17"},{"from":"4.19.1","to":"4.19.17"},{"from":"4.19.9","to":"4.19.16"},{"from":"4.19.11","to":"4.19.17"},{"from":"4.19.2","to":"4.19.17"},{"from":"4.19.12","to":"4.19.17"},{"from":"4.19.6","to":"4.19.17"},{"from":"4.19.4","to":"4.19.17"},{"from":"4.19.12","to":"4.19.16"},{"from":"4.19.10","to":"4.19.16"},{"from":"4.19.9","to":"4.19.17"},{"from":"4.19.5","to":"4.19.17"},{"from":"4.19.10","to":"4.19.17"}],"risks":[{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.19.5","to":"4.19.12"},{"from":"4.19.6","to":"4.19.12"},{"from":"4.19.4","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.19","to":"4.19.3"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1109","name":"HCPMetallbCNOCannotDeployFRRK8S","message":"On Hosted Control Plane (HCP/HyperShift) clusters with installed MetalLB operator, Cluster Network Operator fails to\ndeploy a critical component FRR-k8s when updated. MetalLB will stop working properly and stop advertising services,\nmaking them potentially unreachable from outside the cluster.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\ngroup by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\nand on (_id) (\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"metallb-operator[.].*\"})\n)\nor on (_id) (\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"metallb operator not installed\", \"name\", \".*\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1110","name":"HCPServiceHealthCheckDisruption","message":"When Hosted Control Plane (HCP/HyperShift) clusters running on AWS update a node pool, the Services of type\nLoadBalancer may experience temporary availability disruption because health checks are not set up properly to monitor\nNode readiness state.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\n  or\n  0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\n* on (_id) group_left (type) (\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3357","name":"OldBootImagesComposeFSvsGrubProbe","message":"Upgrade to 4.19 will fail due to a boot image incompatibility issue if a cluster was born in 4.2 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  label_replace(group by (version) (cluster_version{_id=\"\",type=\"initial\",version=~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"yes, so possibly actually born in 4.2 or earlier\", \"\", \"\")\n  or\n  label_replace(0 * group by (version) (cluster_version{_id=\"\",type=\"initial\",version!~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"no, born in 4.10 or later\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.19.16","to":"4.20.0"},{"from":"4.19.17","to":"4.20.0"}],"risks":[{"url":"https://access.redhat.com/solutions/7133826","name":"ARO420UDRWorkerNodesFail","message":"Disconnected ARO clusters or clusters with a UDR 0.0.0.0/0 route definition are not be able to add or replace nodes after an upgrade","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (_id, name) (cluster_operator_conditions{_id=\"\",name=\"aro\"})\n  or\n  0 * group by (_id, name) (cluster_operator_conditions{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.4","to":"4.19.13"},{"from":"4.19.5","to":"4.19.13"},{"from":"4.19.6","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.22","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.22","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.17","to":"4.19.18"},{"from":"4.19.13","to":"4.19.18"},{"from":"4.19.15","to":"4.19.18"},{"from":"4.19.14","to":"4.19.18"},{"from":"4.19.16","to":"4.19.18"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3765","name":"SCOSBootImage","message":"The coreos-bootimages ConfigMap in the openshift-machine-config-operator Namespace thrashes between RHCOS and SCOS content.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.18.10","to":"4.18.11"},{"from":"4.18.8","to":"4.18.10"},{"from":"4.18.9","to":"4.18.10"},{"from":"4.18.7","to":"4.18.11"},{"from":"4.18.9","to":"4.18.11"},{"from":"4.18.6","to":"4.18.10"},{"from":"4.18.7","to":"4.18.10"},{"from":"4.18.6","to":"4.18.11"},{"from":"4.18.8","to":"4.18.11"}],"risks":[{"url":"https://issues.redhat.com/browse/CNF-17689","name":"MetallbBgpBfdFrrRpm","message":"Clusters using MetalLB BFD capabilities alongside BGP can fail to establish BGP peering, reducing the availability of LoadBalancer services exposed by MetalLB, or even making them unreachable","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"metallb-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"metallb operator not installed\", \"name\", \".*\")\n)\n"}}]}]},{"edges":[{"from":"4.18.11","to":"4.18.29"},{"from":"4.20.2","to":"4.20.6"},{"from":"4.20.3","to":"4.20.4"},{"from":"4.18.15","to":"4.18.29"},{"from":"4.18.13","to":"4.18.29"},{"from":"4.20.0","to":"4.20.6"},{"from":"4.18.22","to":"4.18.29"},{"from":"4.20.1","to":"4.20.4"},{"from":"4.18.2","to":"4.18.29"},{"from":"4.20.5","to":"4.20.6"},{"from":"4.20.3","to":"4.20.6"},{"from":"4.20.4","to":"4.20.6"},{"from":"4.20.2","to":"4.20.5"},{"from":"4.18.19","to":"4.18.29"},{"from":"4.20.3","to":"4.20.5"},{"from":"4.20.2","to":"4.20.4"},{"from":"4.18.25","to":"4.18.29"},{"from":"4.19.18","to":"4.19.19"},{"from":"4.18.20","to":"4.18.29"},{"from":"4.18.9","to":"4.18.29"},{"from":"4.20.1","to":"4.20.5"},{"from":"4.18.14","to":"4.18.29"},{"from":"4.18.12","to":"4.18.29"},{"from":"4.18.26","to":"4.18.29"},{"from":"4.20.0","to":"4.20.4"},{"from":"4.18.3","to":"4.18.29"},{"from":"4.18.16","to":"4.18.29"},{"from":"4.18.21","to":"4.18.29"},{"from":"4.18.24","to":"4.18.29"},{"from":"4.18.5","to":"4.18.29"},{"from":"4.18.1","to":"4.18.29"},{"from":"4.18.10","to":"4.18.29"},{"from":"4.18.27","to":"4.18.29"},{"from":"4.19.18","to":"4.19.20"},{"from":"4.19.19","to":"4.19.20"},{"from":"4.20.4","to":"4.20.5"},{"from":"4.18.8","to":"4.18.29"},{"from":"4.18.23","to":"4.18.29"},{"from":"4.18.28","to":"4.18.29"},{"from":"4.20.0","to":"4.20.5"},{"from":"4.18.4","to":"4.18.29"},{"from":"4.18.17","to":"4.18.29"},{"from":"4.20.1","to":"4.20.6"},{"from":"4.18.7","to":"4.18.29"},{"from":"4.18.18","to":"4.18.29"},{"from":"4.18.6","to":"4.18.29"}],"risks":[{"url":"https://issues.redhat.com/browse/RUN-3748","name":"RuncShareProcessNamespace","message":"Some runc 1.2 releases fail to launch containers in some Pods where shareProcessNamespace is explicitly set true.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.18.20","to":"4.19.9"},{"from":"4.18.19","to":"4.19.9"},{"from":"4.18.21","to":"4.19.9"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.19","to":"4.19.7"},{"from":"4.18.20","to":"4.19.7"},{"from":"4.18.21","to":"4.19.7"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.4","to":"4.18.8"},{"from":"4.18.1","to":"4.18.7"},{"from":"4.18.5","to":"4.18.7"},{"from":"4.18.2","to":"4.18.8"},{"from":"4.18.3","to":"4.18.7"},{"from":"4.18.4","to":"4.18.7"},{"from":"4.18.5","to":"4.18.9"},{"from":"4.18.2","to":"4.18.7"},{"from":"4.18.4","to":"4.18.9"},{"from":"4.18.3","to":"4.18.8"},{"from":"4.18.3","to":"4.18.9"},{"from":"4.18.1","to":"4.18.8"},{"from":"4.18.1","to":"4.18.9"},{"from":"4.18.5","to":"4.18.8"},{"from":"4.18.2","to":"4.18.9"}],"risks":[{"url":"https://issues.redhat.com/browse/MCO-1702","name":"RHELFailedRebootMissingService","message":"RHEL worker nodes will fail to reboot during a node update due to a missing service.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\",label_node_openshift_io_os_id=\"rhel\"})\n  or\n  0 * group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.22","to":"4.19.11"},{"from":"4.18.22","to":"4.19.10"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]}]},{"edges":[{"from":"4.18.17","to":"4.18.24"},{"from":"4.18.3","to":"4.18.24"},{"from":"4.18.10","to":"4.18.24"},{"from":"4.18.14","to":"4.18.24"},{"from":"4.18.5","to":"4.18.24"},{"from":"4.18.7","to":"4.18.24"},{"from":"4.18.8","to":"4.18.24"},{"from":"4.18.9","to":"4.18.24"},{"from":"4.18.6","to":"4.18.24"},{"from":"4.18.16","to":"4.18.24"},{"from":"4.18.20","to":"4.18.24"},{"from":"4.18.4","to":"4.18.24"},{"from":"4.18.15","to":"4.18.24"},{"from":"4.18.1","to":"4.18.24"},{"from":"4.18.18","to":"4.18.24"},{"from":"4.18.19","to":"4.18.24"},{"from":"4.18.11","to":"4.18.24"},{"from":"4.18.2","to":"4.18.24"},{"from":"4.18.12","to":"4.18.24"},{"from":"4.18.21","to":"4.18.24"},{"from":"4.18.13","to":"4.18.24"}],"risks":[{"url":"https://issues.redhat.com/browse/COS-3700","name":"ContinuousNodeRebootingDueToKernelPanic","message":"OCP nodes get rebooted continuously due to kernel panic by loading of third party modules.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.5","to":"4.18.6"},{"from":"4.18.3","to":"4.18.6"},{"from":"4.18.1","to":"4.18.6"},{"from":"4.18.4","to":"4.18.6"},{"from":"4.18.2","to":"4.18.6"}],"risks":[{"url":"https://issues.redhat.com/browse/OCPNODE-3074","name":"CRIOLayerCompressionPulls","message":"The CRI-O container runtime may fail to pull images with certain layer compression characteristics","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/MCO-1702","name":"RHELFailedRebootMissingService","message":"RHEL worker nodes will fail to reboot during a node update due to a missing service.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\",label_node_openshift_io_os_id=\"rhel\"})\n  or\n  0 * group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.23","to":"4.19.11"},{"from":"4.18.23","to":"4.19.10"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]}]},{"edges":[{"from":"4.19.15","to":"4.20.13"},{"from":"4.19.15","to":"4.20.0"},{"from":"4.19.15","to":"4.20.3"},{"from":"4.19.15","to":"4.20.10"},{"from":"4.19.15","to":"4.20.11"},{"from":"4.19.17","to":"4.20.11"},{"from":"4.19.16","to":"4.20.10"},{"from":"4.19.17","to":"4.20.3"},{"from":"4.19.16","to":"4.20.11"},{"from":"4.19.16","to":"4.20.12"},{"from":"4.19.17","to":"4.20.1"},{"from":"4.19.15","to":"4.20.12"},{"from":"4.19.15","to":"4.20.1"},{"from":"4.19.17","to":"4.20.10"},{"from":"4.19.15","to":"4.20.8"},{"from":"4.19.16","to":"4.20.8"},{"from":"4.19.15","to":"4.20.2"},{"from":"4.19.16","to":"4.20.2"},{"from":"4.19.17","to":"4.20.13"},{"from":"4.19.17","to":"4.20.12"},{"from":"4.19.16","to":"4.20.3"},{"from":"4.19.16","to":"4.20.13"},{"from":"4.19.16","to":"4.20.1"},{"from":"4.19.17","to":"4.20.2"},{"from":"4.19.17","to":"4.20.8"}],"risks":[{"url":"https://access.redhat.com/solutions/7133826","name":"ARO420UDRWorkerNodesFail","message":"Disconnected ARO clusters or clusters with a UDR 0.0.0.0/0 route definition are not be able to add or replace nodes after an upgrade","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (_id, name) (cluster_operator_conditions{_id=\"\",name=\"aro\"})\n  or\n  0 * group by (_id, name) (cluster_operator_conditions{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.26","to":"4.19.27"},{"from":"4.18.24","to":"4.19.27"},{"from":"4.18.28","to":"4.19.27"},{"from":"4.18.23","to":"4.19.28"},{"from":"4.18.27","to":"4.19.27"},{"from":"4.18.31","to":"4.19.27"},{"from":"4.18.27","to":"4.19.28"},{"from":"4.18.32","to":"4.19.28"},{"from":"4.18.20","to":"4.19.28"},{"from":"4.18.33","to":"4.19.28"},{"from":"4.18.19","to":"4.19.28"},{"from":"4.18.19","to":"4.19.27"},{"from":"4.18.35","to":"4.19.28"},{"from":"4.18.26","to":"4.19.28"},{"from":"4.18.30","to":"4.19.27"},{"from":"4.18.36","to":"4.19.27"},{"from":"4.18.25","to":"4.19.27"},{"from":"4.18.35","to":"4.19.27"},{"from":"4.18.30","to":"4.19.28"},{"from":"4.18.25","to":"4.19.28"},{"from":"4.18.24","to":"4.19.28"},{"from":"4.18.22","to":"4.19.28"},{"from":"4.18.23","to":"4.19.27"},{"from":"4.18.33","to":"4.19.27"},{"from":"4.18.34","to":"4.19.28"},{"from":"4.18.37","to":"4.19.28"},{"from":"4.18.22","to":"4.19.27"},{"from":"4.18.36","to":"4.19.28"},{"from":"4.18.29","to":"4.19.28"},{"from":"4.18.21","to":"4.19.28"},{"from":"4.18.34","to":"4.19.27"},{"from":"4.18.20","to":"4.19.27"},{"from":"4.18.28","to":"4.19.28"},{"from":"4.18.21","to":"4.19.27"},{"from":"4.18.29","to":"4.19.27"},{"from":"4.18.32","to":"4.19.27"},{"from":"4.18.31","to":"4.19.28"}],"risks":[{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://redhat.atlassian.net/browse/CORENET-6950","name":"PrecisionTimeProtocolDPLLPins","message":"Clusters using older PTP operators may struggle to synchronize system clocks and might not provide time to downstream clients.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, name) (csv_succeeded{_id=\"\", name=~\"ptp-operator[.]v4[.][0-9]*[.]0-(202[3-5]|20260[1-9])[0-9]*\"})\nor on (_id)\n0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"ptp-operator.v4.y.0-20260331... or older not installed\", \"\", \"\")\n"}}]}]},{"edges":[{"from":"4.19.11","to":"4.19.12"},{"from":"4.19.10","to":"4.19.12"},{"from":"4.19.9","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.9","to":"4.18.12"},{"from":"4.18.7","to":"4.18.17"},{"from":"4.18.8","to":"4.18.17"},{"from":"4.18.5","to":"4.18.17"},{"from":"4.18.10","to":"4.18.16"},{"from":"4.18.7","to":"4.18.16"},{"from":"4.18.6","to":"4.18.17"},{"from":"4.18.10","to":"4.18.15"},{"from":"4.18.10","to":"4.18.13"},{"from":"4.18.11","to":"4.18.16"},{"from":"4.18.7","to":"4.18.13"},{"from":"4.18.10","to":"4.18.12"},{"from":"4.18.9","to":"4.18.14"},{"from":"4.18.6","to":"4.18.12"},{"from":"4.18.11","to":"4.18.17"},{"from":"4.18.8","to":"4.18.14"},{"from":"4.18.11","to":"4.18.12"},{"from":"4.18.6","to":"4.18.13"},{"from":"4.18.8","to":"4.18.12"},{"from":"4.18.9","to":"4.18.15"},{"from":"4.18.6","to":"4.18.15"},{"from":"4.18.7","to":"4.18.15"},{"from":"4.18.9","to":"4.18.17"},{"from":"4.18.3","to":"4.18.17"},{"from":"4.18.10","to":"4.18.14"},{"from":"4.18.10","to":"4.18.17"},{"from":"4.18.7","to":"4.18.14"},{"from":"4.18.6","to":"4.18.14"},{"from":"4.18.11","to":"4.18.13"},{"from":"4.18.9","to":"4.18.16"},{"from":"4.18.4","to":"4.18.17"},{"from":"4.18.6","to":"4.18.16"},{"from":"4.18.8","to":"4.18.15"},{"from":"4.18.7","to":"4.18.12"},{"from":"4.18.8","to":"4.18.13"},{"from":"4.18.11","to":"4.18.15"},{"from":"4.18.8","to":"4.18.16"},{"from":"4.18.9","to":"4.18.13"},{"from":"4.18.11","to":"4.18.14"},{"from":"4.18.2","to":"4.18.17"},{"from":"4.18.1","to":"4.18.17"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4632","name":"ConsoleEnabledTargetDownAlert","message":"The alert TargetDown is triggered if the capability Console is enabled on the cluster.","matchingRules":[{"type":"PromQL","promql":{"promql":"max(cluster_version_capability{name=\"Console\"})"}}]}]},{"edges":[{"from":"4.19.20","to":"4.20.6"},{"from":"4.19.19","to":"4.20.6"},{"from":"4.19.19","to":"4.20.5"},{"from":"4.19.18","to":"4.20.4"},{"from":"4.19.18","to":"4.20.5"},{"from":"4.19.18","to":"4.20.6"}],"risks":[{"url":"https://access.redhat.com/solutions/7133826","name":"ARO420UDRWorkerNodesFail","message":"Disconnected ARO clusters or clusters with a UDR 0.0.0.0/0 route definition are not be able to add or replace nodes after an upgrade","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (_id, name) (cluster_operator_conditions{_id=\"\",name=\"aro\"})\n  or\n  0 * group by (_id, name) (cluster_operator_conditions{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/RUN-3748","name":"RuncShareProcessNamespace","message":"Some runc 1.2 releases fail to launch containers in some Pods where shareProcessNamespace is explicitly set true.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.18.20","to":"4.19.5"},{"from":"4.18.21","to":"4.19.6"},{"from":"4.18.19","to":"4.19.5"},{"from":"4.18.20","to":"4.19.6"},{"from":"4.18.19","to":"4.19.6"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]}]},{"edges":[{"from":"4.19.1","to":"4.19.5"},{"from":"4.19.2","to":"4.19.4"},{"from":"4.19.3","to":"4.19.6"},{"from":"4.19.0","to":"4.19.4"},{"from":"4.19.3","to":"4.19.4"},{"from":"4.19.1","to":"4.19.6"},{"from":"4.19.1","to":"4.19.4"},{"from":"4.19.3","to":"4.19.5"},{"from":"4.19.0","to":"4.19.6"},{"from":"4.19.0","to":"4.19.5"},{"from":"4.19.2","to":"4.19.6"},{"from":"4.19.2","to":"4.19.5"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.22","to":"4.19.9"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.23","to":"4.19.13"},{"from":"4.18.24","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.7","to":"4.19.20"},{"from":"4.19.16","to":"4.19.19"},{"from":"4.19.15","to":"4.19.19"},{"from":"4.19.3","to":"4.19.20"},{"from":"4.19.10","to":"4.19.20"},{"from":"4.19.5","to":"4.19.20"},{"from":"4.19.10","to":"4.19.19"},{"from":"4.19.9","to":"4.19.19"},{"from":"4.19.7","to":"4.19.19"},{"from":"4.19.11","to":"4.19.19"},{"from":"4.19.6","to":"4.19.19"},{"from":"4.19.15","to":"4.19.20"},{"from":"4.19.0","to":"4.19.19"},{"from":"4.19.4","to":"4.19.19"},{"from":"4.19.1","to":"4.19.19"},{"from":"4.19.2","to":"4.19.19"},{"from":"4.19.13","to":"4.19.19"},{"from":"4.19.3","to":"4.19.19"},{"from":"4.19.14","to":"4.19.19"},{"from":"4.19.16","to":"4.19.20"},{"from":"4.19.13","to":"4.19.20"},{"from":"4.19.0","to":"4.19.20"},{"from":"4.19.11","to":"4.19.20"},{"from":"4.19.17","to":"4.19.19"},{"from":"4.19.4","to":"4.19.20"},{"from":"4.19.12","to":"4.19.19"},{"from":"4.19.1","to":"4.19.20"},{"from":"4.19.5","to":"4.19.19"},{"from":"4.19.17","to":"4.19.20"},{"from":"4.19.9","to":"4.19.20"},{"from":"4.19.6","to":"4.19.20"},{"from":"4.19.12","to":"4.19.20"},{"from":"4.19.14","to":"4.19.20"},{"from":"4.19.2","to":"4.19.20"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/RUN-3748","name":"RuncShareProcessNamespace","message":"Some runc 1.2 releases fail to launch containers in some Pods where shareProcessNamespace is explicitly set true.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.19.16","to":"4.20.5"},{"from":"4.19.16","to":"4.20.4"},{"from":"4.19.17","to":"4.20.5"},{"from":"4.19.16","to":"4.20.6"},{"from":"4.19.15","to":"4.20.4"},{"from":"4.19.17","to":"4.20.6"},{"from":"4.19.15","to":"4.20.6"},{"from":"4.19.15","to":"4.20.5"},{"from":"4.19.17","to":"4.20.4"}],"risks":[{"url":"https://access.redhat.com/solutions/7133826","name":"ARO420UDRWorkerNodesFail","message":"Disconnected ARO clusters or clusters with a UDR 0.0.0.0/0 route definition are not be able to add or replace nodes after an upgrade","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (_id, name) (cluster_operator_conditions{_id=\"\",name=\"aro\"})\n  or\n  0 * group by (_id, name) (cluster_operator_conditions{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/RUN-3748","name":"RuncShareProcessNamespace","message":"Some runc 1.2 releases fail to launch containers in some Pods where shareProcessNamespace is explicitly set true.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.18.23","to":"4.19.25"},{"from":"4.18.30","to":"4.19.25"},{"from":"4.18.25","to":"4.19.24"},{"from":"4.18.24","to":"4.19.24"},{"from":"4.18.32","to":"4.19.25"},{"from":"4.18.30","to":"4.19.23"},{"from":"4.18.21","to":"4.19.22"},{"from":"4.18.27","to":"4.19.25"},{"from":"4.18.29","to":"4.19.25"},{"from":"4.18.33","to":"4.19.25"},{"from":"4.18.24","to":"4.19.22"},{"from":"4.18.25","to":"4.19.25"},{"from":"4.18.22","to":"4.19.24"},{"from":"4.18.29","to":"4.19.23"},{"from":"4.18.23","to":"4.19.24"},{"from":"4.18.22","to":"4.19.25"},{"from":"4.18.27","to":"4.19.22"},{"from":"4.18.27","to":"4.19.23"},{"from":"4.18.31","to":"4.19.22"},{"from":"4.18.24","to":"4.19.21"},{"from":"4.18.28","to":"4.19.25"},{"from":"4.18.20","to":"4.19.23"},{"from":"4.18.27","to":"4.19.24"},{"from":"4.18.23","to":"4.19.21"},{"from":"4.18.30","to":"4.19.24"},{"from":"4.18.19","to":"4.19.21"},{"from":"4.18.34","to":"4.19.25"},{"from":"4.18.20","to":"4.19.22"},{"from":"4.18.22","to":"4.19.22"},{"from":"4.18.20","to":"4.19.24"},{"from":"4.18.22","to":"4.19.23"},{"from":"4.18.19","to":"4.19.22"},{"from":"4.18.25","to":"4.19.22"},{"from":"4.18.25","to":"4.19.23"},{"from":"4.18.19","to":"4.19.23"},{"from":"4.18.26","to":"4.19.23"},{"from":"4.18.32","to":"4.19.24"},{"from":"4.18.24","to":"4.19.25"},{"from":"4.18.29","to":"4.19.21"},{"from":"4.18.33","to":"4.19.24"},{"from":"4.18.30","to":"4.19.22"},{"from":"4.18.22","to":"4.19.21"},{"from":"4.18.19","to":"4.19.25"},{"from":"4.18.21","to":"4.19.25"},{"from":"4.18.30","to":"4.19.21"},{"from":"4.18.31","to":"4.19.24"},{"from":"4.18.29","to":"4.19.24"},{"from":"4.18.31","to":"4.19.23"},{"from":"4.18.26","to":"4.19.24"},{"from":"4.18.21","to":"4.19.21"},{"from":"4.18.28","to":"4.19.23"},{"from":"4.18.27","to":"4.19.21"},{"from":"4.18.31","to":"4.19.25"},{"from":"4.18.29","to":"4.19.22"},{"from":"4.18.28","to":"4.19.22"},{"from":"4.18.26","to":"4.19.21"},{"from":"4.18.23","to":"4.19.22"},{"from":"4.18.26","to":"4.19.25"},{"from":"4.18.20","to":"4.19.25"},{"from":"4.18.20","to":"4.19.21"},{"from":"4.18.23","to":"4.19.23"},{"from":"4.18.21","to":"4.19.24"},{"from":"4.18.28","to":"4.19.24"},{"from":"4.18.28","to":"4.19.21"},{"from":"4.18.24","to":"4.19.23"},{"from":"4.18.25","to":"4.19.21"},{"from":"4.18.19","to":"4.19.24"},{"from":"4.18.21","to":"4.19.23"},{"from":"4.18.26","to":"4.19.22"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]}]},{"edges":[{"from":"4.19.13","to":"4.19.15"},{"from":"4.19.13","to":"4.19.14"},{"from":"4.19.14","to":"4.19.15"}],"risks":[{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.2","to":"4.19.13"},{"from":"4.19.3","to":"4.19.13"},{"from":"4.19.1","to":"4.19.13"},{"from":"4.19.0","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.19","to":"4.18.23"},{"from":"4.18.15","to":"4.18.23"},{"from":"4.18.8","to":"4.18.23"},{"from":"4.18.10","to":"4.18.23"},{"from":"4.18.14","to":"4.18.23"},{"from":"4.18.2","to":"4.18.23"},{"from":"4.18.5","to":"4.18.23"},{"from":"4.18.18","to":"4.18.23"},{"from":"4.18.13","to":"4.18.23"},{"from":"4.18.11","to":"4.18.23"},{"from":"4.18.1","to":"4.18.23"},{"from":"4.18.3","to":"4.18.23"},{"from":"4.18.4","to":"4.18.23"},{"from":"4.18.7","to":"4.18.23"},{"from":"4.18.20","to":"4.18.23"},{"from":"4.18.12","to":"4.18.23"},{"from":"4.18.17","to":"4.18.23"},{"from":"4.18.6","to":"4.18.23"},{"from":"4.18.21","to":"4.18.23"},{"from":"4.18.16","to":"4.18.23"},{"from":"4.18.9","to":"4.18.23"}],"risks":[{"url":"https://issues.redhat.com/browse/RUN-3446","name":"CrunConflictsWithNVIDIA","message":"Some crun 1.23 releases conflict with the NVIDIA GPU Operator over eBPF, causing issues with GPU workloads.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (name) (csv_succeeded{_id=\"\", name=~\"gpu-operator-certified[.].*\"})\nor on (_id)\n0 * group(csv_count{_id=\"\"})"}}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.3","to":"4.18.25"},{"from":"4.18.9","to":"4.18.25"},{"from":"4.18.11","to":"4.18.25"},{"from":"4.18.9","to":"4.18.26"},{"from":"4.18.22","to":"4.18.25"},{"from":"4.18.7","to":"4.18.26"},{"from":"4.18.22","to":"4.18.24"},{"from":"4.18.10","to":"4.18.26"},{"from":"4.18.20","to":"4.18.25"},{"from":"4.18.13","to":"4.18.25"},{"from":"4.18.17","to":"4.18.26"},{"from":"4.18.3","to":"4.18.26"},{"from":"4.18.17","to":"4.18.25"},{"from":"4.18.4","to":"4.18.25"},{"from":"4.18.20","to":"4.18.26"},{"from":"4.18.18","to":"4.18.25"},{"from":"4.18.11","to":"4.18.26"},{"from":"4.18.18","to":"4.18.26"},{"from":"4.18.6","to":"4.18.26"},{"from":"4.18.8","to":"4.18.26"},{"from":"4.18.2","to":"4.18.26"},{"from":"4.18.22","to":"4.18.26"},{"from":"4.18.1","to":"4.18.26"},{"from":"4.18.7","to":"4.18.25"},{"from":"4.18.14","to":"4.18.26"},{"from":"4.18.4","to":"4.18.26"},{"from":"4.18.19","to":"4.18.26"},{"from":"4.18.15","to":"4.18.25"},{"from":"4.18.12","to":"4.18.25"},{"from":"4.18.21","to":"4.18.25"},{"from":"4.18.15","to":"4.18.26"},{"from":"4.18.8","to":"4.18.25"},{"from":"4.18.5","to":"4.18.26"},{"from":"4.18.14","to":"4.18.25"},{"from":"4.18.16","to":"4.18.25"},{"from":"4.18.12","to":"4.18.26"},{"from":"4.18.1","to":"4.18.25"},{"from":"4.18.2","to":"4.18.25"},{"from":"4.18.16","to":"4.18.26"},{"from":"4.18.21","to":"4.18.26"},{"from":"4.18.5","to":"4.18.25"},{"from":"4.18.19","to":"4.18.25"},{"from":"4.18.10","to":"4.18.25"},{"from":"4.18.6","to":"4.18.25"},{"from":"4.18.13","to":"4.18.26"}],"risks":[{"url":"https://issues.redhat.com/browse/COS-3700","name":"ContinuousNodeRebootingDueToKernelPanic","message":"OCP nodes get rebooted continuously due to kernel panic by loading of third party modules.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.20","to":"4.19.26"},{"from":"4.18.26","to":"4.19.26"},{"from":"4.18.27","to":"4.19.26"},{"from":"4.18.19","to":"4.19.26"},{"from":"4.18.23","to":"4.19.26"},{"from":"4.18.35","to":"4.19.26"},{"from":"4.18.22","to":"4.19.26"},{"from":"4.18.32","to":"4.19.26"},{"from":"4.18.31","to":"4.19.26"},{"from":"4.18.30","to":"4.19.26"},{"from":"4.18.29","to":"4.19.26"},{"from":"4.18.34","to":"4.19.26"},{"from":"4.18.25","to":"4.19.26"},{"from":"4.18.33","to":"4.19.26"},{"from":"4.18.21","to":"4.19.26"},{"from":"4.18.24","to":"4.19.26"},{"from":"4.18.28","to":"4.19.26"}],"risks":[{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]}]},{"edges":[{"from":"4.19.1","to":"4.19.16"},{"from":"4.19.4","to":"4.19.16"},{"from":"4.19.3","to":"4.19.16"},{"from":"4.19.2","to":"4.19.16"},{"from":"4.19.6","to":"4.19.16"},{"from":"4.19.5","to":"4.19.16"},{"from":"4.19.7","to":"4.19.16"},{"from":"4.19.0","to":"4.19.16"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.18.2","to":"4.18.10"},{"from":"4.18.5","to":"4.18.10"},{"from":"4.18.1","to":"4.18.10"},{"from":"4.18.1","to":"4.18.11"},{"from":"4.18.5","to":"4.18.11"},{"from":"4.18.3","to":"4.18.10"},{"from":"4.18.2","to":"4.18.11"},{"from":"4.18.4","to":"4.18.11"},{"from":"4.18.3","to":"4.18.11"},{"from":"4.18.4","to":"4.18.10"}],"risks":[{"url":"https://issues.redhat.com/browse/CNF-17689","name":"MetallbBgpBfdFrrRpm","message":"Clusters using MetalLB BFD capabilities alongside BGP can fail to establish BGP peering, reducing the availability of LoadBalancer services exposed by MetalLB, or even making them unreachable","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"metallb-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"metallb operator not installed\", \"name\", \".*\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1702","name":"RHELFailedRebootMissingService","message":"RHEL worker nodes will fail to reboot during a node update due to a missing service.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\",label_node_openshift_io_os_id=\"rhel\"})\n  or\n  0 * group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\"})\n)\n"}}]}]},{"edges":[{"from":"4.18.17","to":"4.19.3"},{"from":"4.18.16","to":"4.19.3"},{"from":"4.18.18","to":"4.19.3"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MON-4270","name":"AlertmanagerV1NotSupported","message":"Upgrade to OpenShift 4.19 will fail on cluster monitoring operator in case API version v1 of Alertmanager\nis still specified in the ConfigMaps \"cluster-monitoring-config\" or \"user-workload-monitoring-config\".","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1109","name":"HCPMetallbCNOCannotDeployFRRK8S","message":"On Hosted Control Plane (HCP/HyperShift) clusters with installed MetalLB operator, Cluster Network Operator fails to\ndeploy a critical component FRR-k8s when updated. MetalLB will stop working properly and stop advertising services,\nmaking them potentially unreachable from outside the cluster.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\ngroup by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\nand on (_id) (\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"metallb-operator[.].*\"})\n)\nor on (_id) (\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"metallb operator not installed\", \"name\", \".*\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1110","name":"HCPServiceHealthCheckDisruption","message":"When Hosted Control Plane (HCP/HyperShift) clusters running on AWS update a node pool, the Services of type\nLoadBalancer may experience temporary availability disruption because health checks are not set up properly to monitor\nNode readiness state.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\n  or\n  0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\n* on (_id) group_left (type) (\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3357","name":"OldBootImagesComposeFSvsGrubProbe","message":"Upgrade to 4.19 will fail due to a boot image incompatibility issue if a cluster was born in 4.2 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  label_replace(group by (version) (cluster_version{_id=\"\",type=\"initial\",version=~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"yes, so possibly actually born in 4.2 or earlier\", \"\", \"\")\n  or\n  label_replace(0 * group by (version) (cluster_version{_id=\"\",type=\"initial\",version!~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"no, born in 4.10 or later\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.18.23","to":"4.18.24"},{"from":"4.18.23","to":"4.18.25"},{"from":"4.18.23","to":"4.18.26"}],"risks":[{"url":"https://issues.redhat.com/browse/COS-3700","name":"ContinuousNodeRebootingDueToKernelPanic","message":"OCP nodes get rebooted continuously due to kernel panic by loading of third party modules.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.19.11","to":"4.19.24"},{"from":"4.19.11","to":"4.19.25"},{"from":"4.19.11","to":"4.19.22"},{"from":"4.19.9","to":"4.19.24"},{"from":"4.19.2","to":"4.19.25"},{"from":"4.19.10","to":"4.19.25"},{"from":"4.19.1","to":"4.19.22"},{"from":"4.19.0","to":"4.19.22"},{"from":"4.19.12","to":"4.19.24"},{"from":"4.19.1","to":"4.19.23"},{"from":"4.19.16","to":"4.19.21"},{"from":"4.19.6","to":"4.19.25"},{"from":"4.19.16","to":"4.19.22"},{"from":"4.19.6","to":"4.19.22"},{"from":"4.19.4","to":"4.19.24"},{"from":"4.19.0","to":"4.19.21"},{"from":"4.19.17","to":"4.19.23"},{"from":"4.19.4","to":"4.19.22"},{"from":"4.19.13","to":"4.19.25"},{"from":"4.19.7","to":"4.19.21"},{"from":"4.19.10","to":"4.19.22"},{"from":"4.19.5","to":"4.19.22"},{"from":"4.19.4","to":"4.19.23"},{"from":"4.19.12","to":"4.19.25"},{"from":"4.19.5","to":"4.19.25"},{"from":"4.19.14","to":"4.19.23"},{"from":"4.19.6","to":"4.19.23"},{"from":"4.19.15","to":"4.19.23"},{"from":"4.19.7","to":"4.19.22"},{"from":"4.19.17","to":"4.19.25"},{"from":"4.19.1","to":"4.19.24"},{"from":"4.19.17","to":"4.19.21"},{"from":"4.19.12","to":"4.19.22"},{"from":"4.19.2","to":"4.19.23"},{"from":"4.19.14","to":"4.19.22"},{"from":"4.19.7","to":"4.19.25"},{"from":"4.19.13","to":"4.19.21"},{"from":"4.19.2","to":"4.19.22"},{"from":"4.19.17","to":"4.20.14"},{"from":"4.19.9","to":"4.19.22"},{"from":"4.19.15","to":"4.19.24"},{"from":"4.19.1","to":"4.19.21"},{"from":"4.19.0","to":"4.19.25"},{"from":"4.19.7","to":"4.19.23"},{"from":"4.19.6","to":"4.19.21"},{"from":"4.19.1","to":"4.19.25"},{"from":"4.19.11","to":"4.19.23"},{"from":"4.19.10","to":"4.19.24"},{"from":"4.19.12","to":"4.19.21"},{"from":"4.19.16","to":"4.19.24"},{"from":"4.19.3","to":"4.19.23"},{"from":"4.19.9","to":"4.19.23"},{"from":"4.19.16","to":"4.20.15"},{"from":"4.19.11","to":"4.19.21"},{"from":"4.19.15","to":"4.19.25"},{"from":"4.19.3","to":"4.19.25"},{"from":"4.19.4","to":"4.19.25"},{"from":"4.19.5","to":"4.19.24"},{"from":"4.19.15","to":"4.20.15"},{"from":"4.19.13","to":"4.19.22"},{"from":"4.19.14","to":"4.19.25"},{"from":"4.19.0","to":"4.19.23"},{"from":"4.19.10","to":"4.19.21"},{"from":"4.19.5","to":"4.19.21"},{"from":"4.19.15","to":"4.19.22"},{"from":"4.19.17","to":"4.20.15"},{"from":"4.19.12","to":"4.19.23"},{"from":"4.19.7","to":"4.19.24"},{"from":"4.19.4","to":"4.19.21"},{"from":"4.19.9","to":"4.19.21"},{"from":"4.19.16","to":"4.19.23"},{"from":"4.19.17","to":"4.19.22"},{"from":"4.19.14","to":"4.19.21"},{"from":"4.19.16","to":"4.19.25"},{"from":"4.19.9","to":"4.19.25"},{"from":"4.19.2","to":"4.19.21"},{"from":"4.19.6","to":"4.19.24"},{"from":"4.19.3","to":"4.19.24"},{"from":"4.19.15","to":"4.19.21"},{"from":"4.19.10","to":"4.19.23"},{"from":"4.19.0","to":"4.19.24"},{"from":"4.19.3","to":"4.19.21"},{"from":"4.19.16","to":"4.20.14"},{"from":"4.19.13","to":"4.19.23"},{"from":"4.19.5","to":"4.19.23"},{"from":"4.19.2","to":"4.19.24"},{"from":"4.19.14","to":"4.19.24"},{"from":"4.19.15","to":"4.20.14"},{"from":"4.19.13","to":"4.19.24"},{"from":"4.19.3","to":"4.19.22"},{"from":"4.19.17","to":"4.19.24"}],"risks":[{"url":"https://issues.redhat.com/browse/CNTRLPLANE-2254","name":"HyperShiftRedundantRouter","message":"Hosted/HyperShift clusters on bare metal or KubeVirt may fail to complete the update. This affects clusters where the HostedCluster has services (e.g., OAuthServer, Ignition) configured with 'servicePublishingStrategy.type: Route' and a route.hostname that is a subdomain of the management cluster's .apps domain.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.24","to":"4.19.12"},{"from":"4.18.23","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.7","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.19","to":"4.19.2"}],"risks":[{"url":"https://access.redhat.com/solutions/7128495","name":"AROMissingInternalLBSAN","message":"ARO clusters on 4.19 experience issues creating new Machines due to missing the Internal LB SAN in the certificate provisioned by MCO. See https://issues.redhat.com/browse/OCPBUGS-59780","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_operator_conditions{_id=\"\",name=\"aro\"})\nor\n0 * group(cluster_operator_conditions{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1109","name":"HCPMetallbCNOCannotDeployFRRK8S","message":"On Hosted Control Plane (HCP/HyperShift) clusters with installed MetalLB operator, Cluster Network Operator fails to\ndeploy a critical component FRR-k8s when updated. MetalLB will stop working properly and stop advertising services,\nmaking them potentially unreachable from outside the cluster.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\ngroup by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\nand on (_id) (\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"metallb-operator[.].*\"})\n)\nor on (_id) (\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"metallb operator not installed\", \"name\", \".*\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1110","name":"HCPServiceHealthCheckDisruption","message":"When Hosted Control Plane (HCP/HyperShift) clusters running on AWS update a node pool, the Services of type\nLoadBalancer may experience temporary availability disruption because health checks are not set up properly to monitor\nNode readiness state.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\n  or\n  0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n)\n* on (_id) group_left (type) (\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1834","name":"InvalidArchitectureValueOfMachinesetsAnnotation","message":"Degrade machine-config cluster operator blocks the cluster update if a GCP or AWS cluster has machinesets with \nmultiple labels embedded within their \"capacity.cluster-autoscaler.kubernetes.io/labels\" annotation.\n","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS|GCP\"})\n  or\n  0 * group by (type) (cluster_infrastructure_provider{_id=\"\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/OCPCLOUD-3052","name":"NoCloudConfConfigMap","message":"Upgrade to 4.19 will complete due to an absent cloud-conf ConfigMap in AWS clusters born in 4.13 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"bottomk by (_id) (1,\n  0 * group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\",configmap=\"cloud-conf\"})\n  or\n  group by (_id, namespace, configmap) (kube_configmap_info{_id=\"\",namespace=\"openshift-cloud-controller-manager\"})\n)\n* on (_id) group_left (type)\ntopk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=\"AWS\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!=\"AWS\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/COS-3357","name":"OldBootImagesComposeFSvsGrubProbe","message":"Upgrade to 4.19 will fail due to a boot image incompatibility issue if a cluster was born in 4.2 or earlier.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  label_replace(group by (version) (cluster_version{_id=\"\",type=\"initial\",version=~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"yes, so possibly actually born in 4.2 or earlier\", \"\", \"\")\n  or\n  label_replace(0 * group by (version) (cluster_version{_id=\"\",type=\"initial\",version!~\"4[.][0-9][.].*\"}),\"born_by_4_9\", \"no, born in 4.10 or later\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/STOR-2486","name":"VSphereStorageMountIssues","message":"vSphere customers using vSAN file volumes can't mount vSphere shared volumes and NFS volumes which server do not set NFS4ERR_ATTRNOTSUPP","matchingRules":[{"type":"PromQL","promql":{"promql":"group(cluster_infrastructure_provider{type=~\"VSphere|None\"})\nor\n0 * group(cluster_infrastructure_provider)\n"}}]}]},{"edges":[{"from":"4.18.19","to":"4.19.12"},{"from":"4.18.20","to":"4.19.12"},{"from":"4.18.21","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.21","to":"4.19.13"},{"from":"4.18.19","to":"4.19.13"},{"from":"4.18.20","to":"4.19.13"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.3","to":"4.19.7"},{"from":"4.19.0","to":"4.19.7"},{"from":"4.19.2","to":"4.19.7"},{"from":"4.19.1","to":"4.19.7"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.19.22","to":"4.20.11"},{"from":"4.19.18","to":"4.20.8"},{"from":"4.19.19","to":"4.20.8"},{"from":"4.19.19","to":"4.20.11"},{"from":"4.19.20","to":"4.20.12"},{"from":"4.19.20","to":"4.20.8"},{"from":"4.19.18","to":"4.20.12"},{"from":"4.19.22","to":"4.20.12"},{"from":"4.19.21","to":"4.20.10"},{"from":"4.19.22","to":"4.20.13"},{"from":"4.19.21","to":"4.20.8"},{"from":"4.19.20","to":"4.20.10"},{"from":"4.19.18","to":"4.20.10"},{"from":"4.19.20","to":"4.20.13"},{"from":"4.19.20","to":"4.20.11"},{"from":"4.19.21","to":"4.20.13"},{"from":"4.19.19","to":"4.20.13"},{"from":"4.19.21","to":"4.20.11"},{"from":"4.19.23","to":"4.20.13"},{"from":"4.19.21","to":"4.20.12"},{"from":"4.19.18","to":"4.20.11"},{"from":"4.19.19","to":"4.20.12"},{"from":"4.19.18","to":"4.20.3"},{"from":"4.19.18","to":"4.20.13"},{"from":"4.19.19","to":"4.20.10"}],"risks":[{"url":"https://access.redhat.com/solutions/7133826","name":"ARO420UDRWorkerNodesFail","message":"Disconnected ARO clusters or clusters with a UDR 0.0.0.0/0 route definition are not be able to add or replace nodes after an upgrade","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (_id, name) (cluster_operator_conditions{_id=\"\",name=\"aro\"})\n  or\n  0 * group by (_id, name) (cluster_operator_conditions{_id=\"\"})\n)\n"}}]}]},{"edges":[{"from":"4.19.4","to":"4.19.7"},{"from":"4.19.6","to":"4.19.7"},{"from":"4.19.5","to":"4.19.7"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]}]},{"edges":[{"from":"4.19.0","to":"4.19.12"},{"from":"4.19.3","to":"4.19.12"},{"from":"4.19.2","to":"4.19.12"},{"from":"4.19.1","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4762","name":"ConsoleCrashOnMissingPlugin","message":"If a Console Operator configuration resource references a plugin name for which there is no corresponding ConsolePlugin resource, the Console may crashloop after the cluster is updated to an affected version.","matchingRules":[{"type":"Always"}]},{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CNTRLPLANE-1407","name":"HyperShiftProxyScheme","message":"Hosted/HyperShift clusters where HostedCluster has a configured proxy needed for IDP or ingress canary probes may lose the ability to login.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]},{"edges":[{"from":"4.18.22","to":"4.19.16"},{"from":"4.18.20","to":"4.19.16"},{"from":"4.18.21","to":"4.19.16"},{"from":"4.18.19","to":"4.19.16"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6196","name":"IPsecLargeClusterConnectivity","message":"Large clusters with enabled IPsec might experience intermittent loss of pod-to-pod connectivity. This prevents some pods on certain nodes from reaching services on other nodes, resulting in connection timeouts.","matchingRules":[{"type":"PromQL","promql":{"promql":"(\n  group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"enabled\", \"\", \"\") == 1)\n  or on (_id)\n  0 * group by (ipsec) (label_replace(max_over_time(ovnkube_controller_ipsec_enabled{_id=\"\"}[1h]), \"ipsec\", \"disabled\", \"\", \"\") == 0)\n) and on (_id) (\n  group by (resource) (max_over_time(apiserver_storage_objects{_id=\"\",resource=\"nodes\"}[1h]) > 120)\n)\nor on (_id)\n0 * group(max_over_time(apiserver_storage_objects{_id=\"\"}[1h]))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6483","name":"NetworkManagerOVNBridgeMapping","message":"On some clusters, the NetworkManager may delete ovs-ports on RHCOS updates, breaking Kubernetes access to those Nodes and wedging the update into the exposed release.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal|OpenStack|VSphere\"})\n  or on (_id)\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"})\n)\n* on (_id) group_left (name)\n(\n  group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"} > 0)\n  or on (_id)\n  0 * group by (_id, name) (kubernetes_nmstate_features_applied{_id=\"\", name=\"ovn.bridge-mappings\"})\n  or on (_id)\n  group by (_id, name) (csv_succeeded{_id=\"\", name=~\"kubevirt-hyperconverged-operator[.].*\"})\n  or on (_id)\n  0 * label_replace(group by (_id) (csv_succeeded{_id=\"\"}), \"name\", \"hyperconverged operator not installed\", \"\", \"\")\n  or on (_id)\n  0 * label_replace(group by (_id) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal|OpenStack|VSphere\"}), \"name\", \"not sure about hyperconverged or ovn.bridge-mappings, but the whole platform is safe\", \"\", \"\")\n)\n"}}]}]},{"edges":[{"from":"4.18.3","to":"4.18.16"},{"from":"4.18.2","to":"4.18.16"},{"from":"4.18.3","to":"4.18.13"},{"from":"4.18.5","to":"4.18.16"},{"from":"4.18.2","to":"4.18.15"},{"from":"4.18.5","to":"4.18.15"},{"from":"4.18.1","to":"4.18.15"},{"from":"4.18.3","to":"4.18.12"},{"from":"4.18.2","to":"4.18.13"},{"from":"4.18.1","to":"4.18.13"},{"from":"4.18.1","to":"4.18.14"},{"from":"4.18.3","to":"4.18.15"},{"from":"4.18.2","to":"4.18.14"},{"from":"4.18.1","to":"4.18.16"},{"from":"4.18.3","to":"4.18.14"},{"from":"4.18.2","to":"4.18.12"},{"from":"4.18.1","to":"4.18.12"},{"from":"4.18.4","to":"4.18.15"},{"from":"4.18.5","to":"4.18.13"},{"from":"4.18.5","to":"4.18.12"},{"from":"4.18.5","to":"4.18.14"},{"from":"4.18.4","to":"4.18.16"},{"from":"4.18.4","to":"4.18.13"},{"from":"4.18.4","to":"4.18.14"},{"from":"4.18.4","to":"4.18.12"}],"risks":[{"url":"https://issues.redhat.com/browse/CONSOLE-4632","name":"ConsoleEnabledTargetDownAlert","message":"The alert TargetDown is triggered if the capability Console is enabled on the cluster.","matchingRules":[{"type":"PromQL","promql":{"promql":"max(cluster_version_capability{name=\"Console\"})"}}]},{"url":"https://issues.redhat.com/browse/MCO-1702","name":"RHELFailedRebootMissingService","message":"RHEL worker nodes will fail to reboot during a node update due to a missing service.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk(1,\n  group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\",label_node_openshift_io_os_id=\"rhel\"})\n  or\n  0 * group by (label_node_openshift_io_os_id) (kube_node_labels{_id=\"\"})\n)\n"}}]}]},{"edges":[{"from":"4.19.7","to":"4.19.12"}],"risks":[{"url":"https://issues.redhat.com/browse/OTA-1705","name":"HyperShiftClusterVersionOperatorMetrics","message":"Hosted/HyperShift clusters in exposed releases will fail to scrape cluster-version operator metrics.","matchingRules":[{"type":"PromQL","promql":{"promql":"group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\n0 * group by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1890","name":"MachineConfigNodesV1AlphaControlPlaneLabels","message":"Standalone clusters born in 4.11 or earlier whose control-plane nodes lack the control-plane role may need that role added to update to the target release.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (role) (kube_node_role{_id=\"\",role=\"control-plane\"})\nor on ()\n1 * group by (role) (kube_node_role{_id=\"\",role=\"master\"})\nor on ()\n0 * topk(1, count by (role) (kube_node_role{_id=\"\"}))\n"}}]},{"url":"https://issues.redhat.com/browse/CORENET-6419","name":"NMStateServiceFailure","message":"The NMState service can fail on baremetal cluster nodes, causing node scaleups and re-deployment failures.","matchingRules":[{"type":"PromQL","promql":{"promql":"topk by (_id) (1,\n  group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type=~\"None|BareMetal\"})\n  or\n  0 * group by (_id, type) (cluster_infrastructure_provider{_id=\"\",type!~\"None|BareMetal\"})\n)\n"}}]},{"url":"https://issues.redhat.com/browse/MCO-1896","name":"OSUpdateFailureDueToImagePullPolicy","message":"Clusters with restrictive image policies may struggle with OS updates when the OS image is already on the local disk.","matchingRules":[{"type":"PromQL","promql":{"promql":"0 * group by (_id, invoker) (cluster_installer{_id=\"\",invoker=\"hypershift\"})\nor\ngroup by (_id, invoker) (cluster_installer{_id=\"\"})\n"}}]}]}]}