aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFabian Groffen <grobian@gentoo.org>2024-02-22 08:27:11 +0100
committerFabian Groffen <grobian@gentoo.org>2024-02-22 08:27:11 +0100
commit07e60cd2a4f67f0b4207fb8150f9d7a1689cb295 (patch)
tree4f659d4e960c4292bfbad6d98a87c684100a686d
parentbuildsys: avoid overuse of hprefixify (diff)
parentNEWS, meson.build: prepare for portage-3.0.62 (diff)
downloadportage-07e60cd2a4f67f0b4207fb8150f9d7a1689cb295.tar.gz
portage-07e60cd2a4f67f0b4207fb8150f9d7a1689cb295.tar.bz2
portage-07e60cd2a4f67f0b4207fb8150f9d7a1689cb295.zip
Merge remote-tracking branch 'origin/master' into prefix
Signed-off-by: Fabian Groffen <grobian@gentoo.org>
-rw-r--r--.git-blame-ignore-revs2
-rw-r--r--.github/workflows/ci.yml47
-rw-r--r--.github/workflows/lint.yml2
-rw-r--r--.pre-commit-config.yaml8
-rw-r--r--NEWS173
-rwxr-xr-xbin/dispatch-conf2
-rwxr-xr-xbin/ebuild-helpers/dohtml2
-rwxr-xr-xbin/ebuild-helpers/fowners2
-rwxr-xr-xbin/ebuild-helpers/fperms2
-rwxr-xr-xbin/ebuild.sh8
-rwxr-xr-xbin/estrip35
-rwxr-xr-xbin/fixpackages71
-rw-r--r--bin/install-qa-check.d/05prefix10
-rw-r--r--bin/install-qa-check.d/60bash-completion4
-rw-r--r--bin/install-qa-check.d/90bad-bin-group-write2
-rw-r--r--bin/install-qa-check.d/90bad-bin-owner2
-rw-r--r--bin/install-qa-check.d/90cmake-warnings2
-rw-r--r--bin/install-qa-check.d/90world-writable2
-rw-r--r--bin/install-qa-check.d/95empty-dirs2
-rw-r--r--bin/phase-functions.sh3
-rw-r--r--bin/phase-helpers.sh10
-rwxr-xr-xbin/quickpkg13
-rw-r--r--cnf/make.conf.example.arc.diff46
-rw-r--r--cnf/make.globals10
-rw-r--r--lib/_emerge/AbstractDepPriority.py3
-rw-r--r--lib/_emerge/AsynchronousTask.py6
-rw-r--r--lib/_emerge/Binpkg.py34
-rw-r--r--lib/_emerge/BinpkgFetcher.py120
-rw-r--r--lib/_emerge/BinpkgPrefetcher.py36
-rw-r--r--lib/_emerge/BinpkgVerifier.py22
-rw-r--r--lib/_emerge/DepPriority.py4
-rw-r--r--lib/_emerge/DepPriorityNormalRange.py4
-rw-r--r--lib/_emerge/DepPrioritySatisfiedRange.py1
-rw-r--r--lib/_emerge/EbuildBinpkg.py37
-rw-r--r--lib/_emerge/EbuildBuild.py39
-rw-r--r--lib/_emerge/EbuildFetchonly.py10
-rw-r--r--lib/_emerge/EbuildMetadataPhase.py52
-rw-r--r--lib/_emerge/EbuildPhase.py29
-rw-r--r--lib/_emerge/MergeListItem.py1
-rw-r--r--lib/_emerge/MetadataRegen.py16
-rw-r--r--lib/_emerge/PipeReader.py1
-rw-r--r--lib/_emerge/Scheduler.py65
-rw-r--r--lib/_emerge/SpawnProcess.py50
-rw-r--r--lib/_emerge/SubProcess.py28
-rw-r--r--lib/_emerge/UnmergeDepPriority.py38
-rw-r--r--lib/_emerge/actions.py51
-rw-r--r--lib/_emerge/depgraph.py238
-rw-r--r--lib/_emerge/resolver/circular_dependency.py21
-rw-r--r--lib/_emerge/resolver/slot_collision.py12
-rw-r--r--lib/portage/_compat_upgrade/binpkg_format.py51
-rw-r--r--lib/portage/_compat_upgrade/meson.build1
-rw-r--r--lib/portage/_emirrordist/DeletionIterator.py8
-rw-r--r--lib/portage/_emirrordist/FetchIterator.py18
-rw-r--r--lib/portage/_global_updates.py15
-rw-r--r--lib/portage/_selinux.py17
-rw-r--r--lib/portage/_sets/dbapi.py2
-rw-r--r--lib/portage/_sets/libs.py1
-rw-r--r--lib/portage/binpkg.py2
-rw-r--r--lib/portage/cache/anydbm.py17
-rw-r--r--lib/portage/const.py4
-rw-r--r--lib/portage/dbapi/__init__.py14
-rw-r--r--lib/portage/dbapi/bintree.py187
-rw-r--r--lib/portage/dbapi/porttree.py130
-rw-r--r--lib/portage/dbapi/vartree.py51
-rw-r--r--lib/portage/dep/__init__.py24
-rw-r--r--lib/portage/dep/_slot_operator.py1
-rw-r--r--lib/portage/dep/dep_check.py17
-rw-r--r--lib/portage/dep/libc.py83
-rw-r--r--lib/portage/dep/meson.build1
-rw-r--r--lib/portage/dep/soname/multilib_category.py10
-rw-r--r--lib/portage/emaint/main.py2
-rw-r--r--lib/portage/emaint/modules/merges/merges.py4
-rw-r--r--lib/portage/exception.py10
-rw-r--r--lib/portage/gpg.py14
-rw-r--r--lib/portage/gpkg.py34
-rw-r--r--lib/portage/output.py15
-rw-r--r--lib/portage/package/ebuild/config.py67
-rw-r--r--lib/portage/package/ebuild/doebuild.py214
-rw-r--r--lib/portage/package/ebuild/fetch.py1
-rw-r--r--lib/portage/process.py766
-rw-r--r--lib/portage/proxy/objectproxy.py1
-rw-r--r--lib/portage/sync/modules/git/git.py32
-rw-r--r--lib/portage/sync/modules/rsync/rsync.py40
-rw-r--r--lib/portage/tests/__init__.py12
-rw-r--r--lib/portage/tests/bin/setup_env.py7
-rw-r--r--lib/portage/tests/dbapi/test_auxdb.py71
-rw-r--r--lib/portage/tests/dbapi/test_portdb_cache.py6
-rw-r--r--lib/portage/tests/dep/meson.build1
-rw-r--r--lib/portage/tests/dep/test_libc.py81
-rw-r--r--lib/portage/tests/dep/test_overlap_dnf.py49
-rw-r--r--lib/portage/tests/ebuild/test_fetch.py24
-rw-r--r--lib/portage/tests/emerge/conftest.py21
-rw-r--r--lib/portage/tests/emerge/meson.build2
-rw-r--r--lib/portage/tests/emerge/test_actions.py23
-rw-r--r--lib/portage/tests/emerge/test_baseline.py2
-rw-r--r--lib/portage/tests/emerge/test_binpkg_fetch.py226
-rw-r--r--lib/portage/tests/emerge/test_config_protect.py2
-rw-r--r--lib/portage/tests/emerge/test_emerge_blocker_file_collision.py2
-rw-r--r--lib/portage/tests/emerge/test_emerge_slot_abi.py2
-rw-r--r--lib/portage/tests/emerge/test_libc_dep_inject.py552
-rw-r--r--lib/portage/tests/env/config/test_PortageModulesFile.py3
-rw-r--r--lib/portage/tests/glsa/test_security_set.py4
-rw-r--r--lib/portage/tests/gpkg/test_gpkg_gpg.py23
-rw-r--r--lib/portage/tests/gpkg/test_gpkg_metadata_update.py2
-rw-r--r--lib/portage/tests/gpkg/test_gpkg_metadata_url.py45
-rw-r--r--lib/portage/tests/gpkg/test_gpkg_path.py3
-rw-r--r--lib/portage/tests/locks/test_lock_nonblock.py55
-rw-r--r--lib/portage/tests/news/test_NewsItem.py3
-rw-r--r--lib/portage/tests/process/meson.build1
-rw-r--r--lib/portage/tests/process/test_AsyncFunction.py27
-rw-r--r--lib/portage/tests/process/test_spawn_fail_e2big.py7
-rw-r--r--lib/portage/tests/process/test_spawn_returnproc.py39
-rw-r--r--lib/portage/tests/resolver/ResolverPlayground.py96
-rw-r--r--lib/portage/tests/resolver/meson.build2
-rw-r--r--lib/portage/tests/resolver/soname/test_skip_update.py17
-rw-r--r--lib/portage/tests/resolver/test_broken_deps.py76
-rw-r--r--lib/portage/tests/resolver/test_cross_dep_priority.py164
-rw-r--r--lib/portage/tests/resolver/test_depclean_order.py117
-rw-r--r--lib/portage/tests/resolver/test_eapi.py4
-rw-r--r--lib/portage/tests/sets/base/test_variable_set.py8
-rw-r--r--lib/portage/tests/sets/files/test_config_file_set.py3
-rw-r--r--lib/portage/tests/sets/files/test_static_file_set.py3
-rw-r--r--lib/portage/tests/sets/shell/test_shell.py4
-rw-r--r--lib/portage/tests/sync/test_sync_local.py2
-rw-r--r--lib/portage/tests/update/test_move_ent.py205
-rw-r--r--lib/portage/tests/update/test_move_slot_ent.py157
-rw-r--r--lib/portage/tests/update/test_update_dbentry.py217
-rw-r--r--lib/portage/tests/util/dyn_libs/meson.build1
-rw-r--r--lib/portage/tests/util/dyn_libs/test_installed_dynlibs.py65
-rw-r--r--lib/portage/tests/util/futures/asyncio/meson.build1
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_child_watcher.py50
-rw-r--r--lib/portage/tests/util/futures/test_retry.py43
-rw-r--r--lib/portage/tests/util/test_manifest.py7
-rw-r--r--lib/portage/util/_async/AsyncTaskFuture.py8
-rw-r--r--lib/portage/util/_async/BuildLogger.py39
-rw-r--r--lib/portage/util/_async/ForkProcess.py191
-rw-r--r--lib/portage/util/_async/PipeLogger.py1
-rw-r--r--lib/portage/util/_async/PopenProcess.py5
-rw-r--r--lib/portage/util/_async/TaskScheduler.py1
-rw-r--r--lib/portage/util/_dyn_libs/LinkageMapELF.py3
-rw-r--r--lib/portage/util/_dyn_libs/dyn_libs.py43
-rw-r--r--lib/portage/util/elf/constants.py5
-rw-r--r--lib/portage/util/file_copy/__init__.py7
-rw-r--r--lib/portage/util/futures/_asyncio/__init__.py32
-rw-r--r--lib/portage/util/futures/_sync_decorator.py8
-rw-r--r--lib/portage/util/futures/executor/fork.py6
-rw-r--r--lib/portage/util/locale.py73
-rw-r--r--lib/portage/util/socks5.py36
-rw-r--r--man/ebuild.58
-rw-r--r--man/emerge.15
-rw-r--r--man/make.conf.530
-rw-r--r--meson.build2
-rwxr-xr-xmisc/emerge-delta-webrsync1
-rw-r--r--tox.ini3
154 files changed, 5024 insertions, 1252 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 2bbac54e6..e33a55c67 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -12,3 +12,5 @@ cb8eda531dad92bec9293c89f93db9b581245fd3
a83507be7ce04d3ac421f9cbe8b63816809b0f4e
# */*: rerun black w/ 23.1.0
4ceb199aab8035fdf2ebd244e213ca63c29b4d5f
+# */*: rerun black w/ 24.1.0
+3d55e159c473075c7b2f87c92293b0df6fa57563
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index eedbe2f6e..e4168203d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -11,12 +11,27 @@ jobs:
runs-on: ubuntu-22.04
strategy:
matrix:
+ start-method:
+ - 'fork'
+ - 'spawn'
python-version:
- '3.9'
- '3.10'
- '3.11'
- '3.12-dev'
+ - '3.13-dev'
- 'pypy-3.10'
+ exclude:
+ - python-version: '3.9'
+ start-method: 'spawn'
+ - python-version: '3.10'
+ start-method: 'spawn'
+ - python-version: '3.11'
+ start-method: 'spawn'
+ - python-version: '3.13-dev'
+ start-method: 'spawn'
+ - python-version: 'pypy-3.10'
+ start-method: 'spawn'
fail-fast: false
steps:
- uses: actions/checkout@v3
@@ -29,7 +44,7 @@ jobs:
set -xe
echo "force-unsafe-io" | sudo tee /etc/dpkg/dpkg.cfg.d/force-unsafe-io
sudo apt-get update -q
- sudo apt-get install -qy --no-install-recommends libxslt-dev libxml2-dev libxml2-utils meson zstd
+ sudo apt-get install -qy --no-install-recommends libxslt-dev libxml2-dev libxml2-utils meson pax-utils zstd
# Patch Ubuntu's old Meson to fix pypy-3.9 detection.
curl -s -f https://github.com/mesonbuild/meson/commit/2540ad6e9e08370ddd0b6753fdc9314945a672f0.patch | sudo patch -d /usr/lib/python3/dist-packages -p1 --no-backup-if-mismatch
@@ -38,10 +53,35 @@ jobs:
python -m site
python -m pip install --upgrade pip
# setuptools needed for 3.12+ because of https://github.com/mesonbuild/meson/issues/7702.
- python -m pip install pytest pytest-xdist setuptools
+ python -m pip install pytest pytest-rerunfailures pytest-xdist setuptools
# symlink /bin/true to /usr/bin/getuto (or do we want to grab the script from github?)
sudo ln -s /bin/true /usr/bin/getuto
+ - name: Patch python scripts to set spawn start method
+ if: ${{ matrix.start-method == 'spawn' }}
+ run: |
+ IFS=''
+ while read -r bin_file; do
+ if [[ $(head -n1 "${bin_file}") == '#!/usr/bin/env python' ]]; then
+ mode=top
+ while read -r line; do
+ if [[ ${mode} == top ]]; then
+ if [[ ${line} == \#* ]]; then
+ echo "${line}"
+ else
+ echo "import multiprocessing"
+ echo 'multiprocessing.set_start_method("spawn", force=True)'
+ echo "${line}"
+ mode=bottom
+ fi
+ else
+ echo "${line}"
+ fi
+ done < "${bin_file}" > "${bin_file}.new"
+ chmod +x "${bin_file}.new"
+ mv "${bin_file}"{.new,}
+ fi
+ done < <(find bin -maxdepth 1 -type f)
- name: Test meson install --destdir /tmp/install-root
run: |
echo -e "[binaries]\npython = '$(command -v python)'" > /tmp/native.ini
@@ -49,5 +89,8 @@ jobs:
meson install -C /tmp/build --destdir /tmp/install-root
- name: Run tests for ${{ matrix.python-version }}
run: |
+ [[ "${{ matrix.start-method }}" == "spawn" ]] && export PORTAGE_MULTIPROCESSING_START_METHOD=spawn
export PYTEST_ADDOPTS="-vv -ra -l -o console_output_style=count -n $(nproc) --dist=worksteal"
+ # Use pytest-rerunfailures to workaround pytest-xdist worker crashes with spawn start-method (bug 924416).
+ [[ "${{ matrix.start-method }}" == "spawn" ]] && PYTEST_ADDOPTS+=" --reruns 5 --only-rerun 'worker .* crashed while running'"
meson test -C /tmp/build --verbose
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 69b9578d4..2091796b7 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -13,7 +13,7 @@ jobs:
echo "::set-output name=missed::$(
find bin -type f -not -name '*.py' -not -name '*.sh' | \
xargs grep -l '#!/usr/bin/env python' | tr $'\n' ' ')"
- - uses: psf/black@stable
+ - uses: psf/black@24.1.1
with:
src: . ${{ steps.stragglers.outputs.missed }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 53a1d33aa..f6fca10cb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,21 +1,21 @@
---
repos:
- repo: https://github.com/psf/black
- rev: 23.7.0
+ rev: 24.1.1
hooks:
- id: black
- repo: https://github.com/PyCQA/pylint
- rev: v3.0.0a6
+ rev: v3.0.0a7
hooks:
- id: pylint
additional_dependencies:
- 'pytest'
- repo: https://github.com/ikamensh/flynt/
- rev: '1.0.0'
+ rev: '1.0.1'
hooks:
- id: flynt
- repo: https://github.com/asottile/pyupgrade
- rev: v3.9.0
+ rev: v3.12.0
hooks:
- id: pyupgrade
args: [--py39-plus]
diff --git a/NEWS b/NEWS
index ce9ed282b..3fbc72786 100644
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,179 @@ Release notes take the form of the following optional categories:
* Bug fixes
* Cleanups
+portage-3.0.62 (2024-02-22)
+--------------
+
+This release has a lot of misc. bug fixes as well as many asyncio fixes from Zac to
+kill off unsafe fork use.
+
+Features:
+* cnf: make.conf.example.arc: Add for the arc arch.
+
+* ELF: Add entries for ARC machines
+
+* bintree: Use urllib to parse PORTAGE_BINHOST / sync-uri so IPv6 works (bug #921400).
+
+* FEATURES: Add FEATURES="merge-wait", enabled by default, to control whether
+ we do parallel merges of images to the live filesystem (bug #663324).
+
+ If enabled, we serialize these merges.
+
+ For now, this makes FEATURES="parallel-install" a no-op, but in future, it
+ will be improved to allow parallel merges, just not while any packages are
+ compiling.
+
+* estrip: Add FEATURES="dedupdebug" (bug #906368).
+
+ This uses sys-devel/dwz to deduplicate installed DWARF debug information.
+
+ Future improvements may be made for multifile support.
+
+* Support PROPERTIES="test_privileged" to not drop privileges for tests (bug #924585).
+
+Bug fixes:
+* vartree: Make _needs_move() resilient to corrupt files on the live filesystem
+ (do a full copy/replace).
+
+* binpkgs: Handle SignatureException during package moves (bug #922142).
+
+* gpkg, bintree, binpkg: Add missing newlines to signed binpkg update notice.
+
+* emerge: Backtrack consistently regardless of --fetchonly (bug #922038).
+
+* emaint: Cleanup duplicate 'usage:' output.
+
+* installed_dynlibs: Resolve *.so symlinks (bug #921170).
+
+* bin: Consistently prefix eqawarn with 'QA Notice' (bug #728046).
+
+* _overlap_dnf: Deduplicate any-of blocks which improves performance (bug #891137).
+
+* tests: Handle larger PAGE_SIZE in environment size check (bug #923368).
+
+Cleanups:
+* tests: Add a job with multiprocessing set to the 'spawn' method.
+
+* tests: Fix missing playground cleanups.
+
+* process.spawn: Add abstraction for os.fork() to allow migrating callers to it.
+
+portage-3.0.61 (2024-01-05)
+--------------
+
+A small set of binpkg robustness fixes.
+
+Bug fixes:
+* BinpkgFetcher: Preserve mtime (bug #921208).
+
+* bintree: Don't call trust helper unless bindb is writable (bug #915842, bug #920180).
+
+* bintree: Avoid unnecessary build id incrementation (bug #921208).
+
+* bintree: Handle inject failures (bug #921327).
+
+* emaint: merges: Fix TrackingFile for utf8_mode (bug #921181).
+
+portage-3.0.60 (2024-01-02)
+--------------
+
+A small set of binpkg robustness fixes.
+
+Bug fixes:
+* EbuildBinpkg: Avoid crash with verification failure in some cases (bug #921089).
+
+* dbapi: Raise CorruptionKeyError - which is treated as a warning - during pkgmoves
+ to avoid falling over in the event of a corrupt Packages index (bug #920828).
+
+* EbuildFetchTestCase: Fix key order assumption (bug #921107).
+
+* gpkg: Fix basename handling via aux_update (in particular with FEATURES="binpkg-multi-instance")
+ (bug #920828).
+
+portage-3.0.59 (2023-12-27)
+--------------
+
+Features:
+* emerge: depclean now returns with failure if no packages are matched
+ (bug #917120).
+
+* bintree: Support 'file://' scheme for binhost sync-uri (bug #920537).
+
+* sync: git: Include signing key and git revision in log output with --verbose
+ or if a failure occurs.
+
+Bug fixes:
+* Scheduler: Handle aborted unpack more gracefully (bug #920258).
+
+* Scheduler: Handle SignatureException in _run_pkg_pretend to give a proper
+ error message if a signature could not be verified, rather than crashing out
+ (bug #920258).
+
+* ebuild: Improve 'command not found' regex for the dash shell (bug #822033).
+
+* ebuild: Do not skip 'command not found' QA warnings for configure (bug #245716).
+
+* emerge: Fix --binpkg-respect-use output optimization (was broken by refactoring
+ for ROOT support).
+
+* depgraph: Use strip_libc_deps in _eliminate_rebuilds (bug #915494).
+
+* depgraph: Handle IDEPEND circular dependencies for depclean (bug #916135).
+
+* bindbapi: Update Packages index when signed binpkgs are invalidated post-updates
+ (bug #920095).
+
+* _global_updates: Take a lock when performing updates (bug #587088, bug #920095).
+
+* make.conf(5): Fix note about binpkg extension for gpkg.
+
+* addread, addwrite, adddeny, addpredict: Warn about passing a colon-separated
+ list of paths as argument (bug #920654).
+
+portage-3.0.58 (2023-12-14)
+--------------
+
+Breaking changes:
+* Switch BINPKG_FORMAT to "gpkg" by default (bug #912672).
+
+portage-3.0.57 (2023-12-10)
+--------------
+
+This release has a series of binpkg robustness fixes. Notably, it includes
+a workaround/fix for binpkgs built against a newer glibc to ensure that they
+aren't merged on a binpkg consumer before merging a newer glibc, which has been
+a longstanding cause of frustration for binpkg users.
+
+Features:
+* emerge: Make --binpkg-changed-deps output far more concise (don't show
+ packages missing from the merge list).
+
+* ebuild: Inject implicit libc RDEPEND. This avoids "bricking" systems when
+ using binpkgs built against a newer glibc (bug #753500, bug #913628).
+
+* BinpkgVerifier: Improve error message on stale binpkg index. This became
+ more common with the portage-3.0.52 default change to FEATURES="pkgdir-index-trusted"
+ (bug #915474).
+
+Bug fixes:
+* Fix package moves for signed binary packages by deleting the old binpkg
+ if it cannot be re-signed, rather than crashing (bug #919419).
+
+* Set SYSROOT appropriately for best_version and has_version so that they work
+ when cross-compiling and IPC is disabled.
+
+* bindbapi: Add missing SIZE key for binpkg-multi-instance (bug #906675,
+ bug #918597, bug #919668).
+
+* resolver: Weaken circular dependency handling for cross-root (bug #919174).
+
+* BuildLogger: Avoid hang with -ipc builds (bug #919072).
+
+* soname dependency support: Fix crash with --ignore-soname-deps=n (bug #919311).
+
+* sets: VariableSet: Flatten dependencies so that e.g. www-client/firefox
+ appears in @rust-rebuild.
+
portage-3.0.56 (2023-12-01)
--------------
diff --git a/bin/dispatch-conf b/bin/dispatch-conf
index 849be562e..601110ce8 100755
--- a/bin/dispatch-conf
+++ b/bin/dispatch-conf
@@ -475,7 +475,7 @@ class dispatch:
try:
os.rename(newconf, curconf)
- except (OSError, os.error) as why:
+ except OSError as why:
writemsg(
f"dispatch-conf: Error renaming {newconf} to {curconf}: {str(why)}; fatal\n",
noiselevel=-1,
diff --git a/bin/ebuild-helpers/dohtml b/bin/ebuild-helpers/dohtml
index 5384eeb8b..b1636d6e8 100755
--- a/bin/ebuild-helpers/dohtml
+++ b/bin/ebuild-helpers/dohtml
@@ -10,7 +10,7 @@ if ! ___eapi_has_dohtml; then
fi
if ___eapi_has_dohtml_deprecated; then
- eqawarn "'${0##*/}' is deprecated in EAPI '${EAPI}'"
+ eqawarn "QA Notice: '${0##*/}' is deprecated in EAPI '${EAPI}'"
fi
# Use safe cwd, avoiding unsafe import for bug #469338.
diff --git a/bin/ebuild-helpers/fowners b/bin/ebuild-helpers/fowners
index fa4e3c0d7..d05094cab 100755
--- a/bin/ebuild-helpers/fowners
+++ b/bin/ebuild-helpers/fowners
@@ -21,7 +21,7 @@ for arg; do
args+=( "${ED%/}/${arg#/}" )
# remove the QA warning after 2024-12-31
if [[ ${arg:0:1} != / ]]; then
- eqawarn "${0##*/}: Path '${arg}' does not start with '/'."
+ eqawarn "QA Notice: ${0##*/}: Path '${arg}' does not start with '/'."
eqawarn "This is unsupported. Add a slash for a path in \${ED},"
eqawarn "or use 'chown' for a path relative to the working dir."
fi
diff --git a/bin/ebuild-helpers/fperms b/bin/ebuild-helpers/fperms
index d52f5a767..16772d11f 100755
--- a/bin/ebuild-helpers/fperms
+++ b/bin/ebuild-helpers/fperms
@@ -22,7 +22,7 @@ for arg; do
args+=( "${ED%/}/${arg#/}" )
# remove the QA warning after 2024-12-31
if [[ ${arg:0:1} != / ]]; then
- eqawarn "${0##*/}: Path '${arg}' does not start with '/'."
+ eqawarn "QA Notice: ${0##*/}: Path '${arg}' does not start with '/'."
eqawarn "This is unsupported. Add a slash for a path in \${ED},"
eqawarn "or use 'chmod' for a path relative to the working dir."
fi
diff --git a/bin/ebuild.sh b/bin/ebuild.sh
index dc8d205f9..c9f7c04e2 100755
--- a/bin/ebuild.sh
+++ b/bin/ebuild.sh
@@ -162,7 +162,10 @@ fi
__sb_append_var() {
local _v=$1 ; shift
local var="SANDBOX_${_v}"
- [[ -z $1 || -n $2 ]] && die "Usage: add$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${_v}") <colon-delimited list of paths>"
+ [[ $# -eq 1 ]] || die "Usage: add${_v,,} <path>"
+ # Make this fatal after 2024-12-31
+ [[ ${1} == *:* ]] \
+ && eqawarn "QA Notice: add${_v,,} called with colon-separated argument"
export ${var}="${!var:+${!var}:}$1"
}
# bash-4 version:
@@ -173,8 +176,9 @@ addwrite() { __sb_append_var WRITE "$@" ; }
adddeny() { __sb_append_var DENY "$@" ; }
addpredict() { __sb_append_var PREDICT "$@" ; }
+addread /
+addread "${PORTAGE_TMPDIR}/portage"
addwrite "${PORTAGE_TMPDIR}/portage"
-addread "/:${PORTAGE_TMPDIR}/portage"
[[ -n ${PORTAGE_GPG_DIR} ]] && addpredict "${PORTAGE_GPG_DIR}"
# Avoid sandbox violations in temporary directories.
diff --git a/bin/estrip b/bin/estrip
index 3ac6a1692..2d9d50922 100755
--- a/bin/estrip
+++ b/bin/estrip
@@ -15,8 +15,8 @@ exp_tf() {
eval ${var}_${flag}=$(tf has ${flag} ${!var})
done
}
-exp_tf FEATURES compressdebug installsources nostrip splitdebug xattr
-exp_tf PORTAGE_RESTRICT binchecks installsources splitdebug strip
+exp_tf FEATURES compressdebug dedupdebug installsources nostrip splitdebug xattr
+exp_tf PORTAGE_RESTRICT binchecks dedupdebug installsources splitdebug strip
if ! ___eapi_has_prefix_variables; then
EPREFIX= ED=${D}
@@ -201,6 +201,10 @@ fi
[[ ${debugedit} ]] && debugedit_found=true || debugedit_found=false
debugedit_warned=false
+dwz=$(type -P dwz)
+[[ ${dwz} ]] && dwz_found=true || dwz_found=false
+dwz_warned=false
+
__multijob_init
# Setup ${T} filesystem layout that we care about.
@@ -248,6 +252,32 @@ __try_symlink() {
die "failed to create symlink '${name}'"
}
+# Usage: dedup_elf_debug <src> <inode_dedupdebug>
+dedup_elf_debug() {
+ ${FEATURES_dedupdebug} || return 0
+ ${PORTAGE_RESTRICT_dedupdebug} && return 0
+
+ debug-print-function "${FUNCNAME}" "$@"
+
+ if ! ${dwz_found} ; then
+ if ! ${dwz_warned} ; then
+ dwz_warned=true
+ ewarn "FEATURES=dedupdebug is enabled but the dwz binary could not be"
+ ewarn "found. This feature will not work unless dwz is installed!"
+ fi
+ return 0
+ fi
+
+ local src=$1 # File to dedup debug symbols
+ local inode_dedupdebug=$2 # Temp path for hard link tracking
+
+ # We already dedupdebug-ed this inode.
+ [[ -L ${inode_dedupdebug} ]] && return 0
+
+ "${dwz}" -- "${src}"
+ touch "${inode_dedupdebug}"
+}
+
# Usage: save_elf_debug <src> <inode_debug> [splitdebug]
save_elf_debug() {
${FEATURES_splitdebug} || return 0
@@ -355,6 +385,7 @@ process_elf() {
xt_data=$(dump_xattrs "${x}")
fi
save_elf_sources "${x}"
+ dedup_elf_debug "${x}" "${inode_link}_dedupdebug"
fi
if ${strip_this} ; then
diff --git a/bin/fixpackages b/bin/fixpackages
index 6f88bea7c..76c8f6d38 100755
--- a/bin/fixpackages
+++ b/bin/fixpackages
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import argparse
@@ -17,44 +17,49 @@ if osp.isfile(
import portage
portage._internal_caller = True
-from portage import os
from portage.output import EOutput
from textwrap import wrap
from portage._global_updates import _global_updates
-mysettings = portage.settings
-mytrees = portage.db
-mtimedb = portage.mtimedb
-description = """The fixpackages program performs package move updates on
- configuration files, installed packages, and binary packages."""
-description = " ".join(description.split())
+def main():
+ mysettings = portage.settings
+ mytrees = portage.db
+ mtimedb = portage.mtimedb
-parser = argparse.ArgumentParser(description=description)
-parser.parse_args()
+ description = """The fixpackages program performs package move updates on
+ configuration files, installed packages, and binary packages."""
+ description = " ".join(description.split())
-if mysettings["ROOT"] != "/":
- out = EOutput()
- msg = (
- "The fixpackages program is not intended for use with "
- + 'ROOT != "/". Instead use `emaint --fix movebin` and/or '
- + "`emaint --fix moveinst."
- )
- for line in wrap(msg, 72):
- out.eerror(line)
- sys.exit(1)
-
-try:
- os.nice(int(mysettings.get("PORTAGE_NICENESS", "0")))
-except (OSError, ValueError) as e:
- portage.writemsg(
- f"!!! Failed to change nice value to '{mysettings['PORTAGE_NICENESS']}'\n"
- )
- portage.writemsg(f"!!! {str(e)}\n")
- del e
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+
+ if mysettings["ROOT"] != "/":
+ out = EOutput()
+ msg = (
+ "The fixpackages program is not intended for use with "
+ + 'ROOT != "/". Instead use `emaint --fix movebin` and/or '
+ + "`emaint --fix moveinst."
+ )
+ for line in wrap(msg, 72):
+ out.eerror(line)
+ sys.exit(1)
+
+ try:
+ os.nice(int(mysettings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ portage.writemsg(
+ f"!!! Failed to change nice value to '{mysettings['PORTAGE_NICENESS']}'\n"
+ )
+ portage.writemsg(f"!!! {str(e)}\n")
+ del e
+
+ _global_updates(mytrees, mtimedb["updates"], if_mtime_changed=False)
+
+ print()
+ print("Done.")
+ print()
-_global_updates(mytrees, mtimedb["updates"], if_mtime_changed=False)
-print()
-print("Done.")
-print()
+if __name__ == "__main__":
+ main()
diff --git a/bin/install-qa-check.d/05prefix b/bin/install-qa-check.d/05prefix
index 8cf372862..229daff6a 100644
--- a/bin/install-qa-check.d/05prefix
+++ b/bin/install-qa-check.d/05prefix
@@ -75,7 +75,7 @@ install_qa_check_prefix() {
echo "${fn#${D}}:${line[0]} (explicit EPREFIX but target not found)" \
>> "${T}"/non-prefix-shebangs-errs
else
- eqawarn "${fn#${D}} has explicit EPREFIX in shebang but target not found (${line[0]})"
+ eqawarn "QA Notice: ${fn#${D}} has explicit EPREFIX in shebang but target not found (${line[0]})"
fi
fi
continue
@@ -83,9 +83,9 @@ install_qa_check_prefix() {
# unprefixed shebang, is the script directly in ${PATH} or an init script?
if [[ ":${PATH}:${EPREFIX}/etc/init.d:" == *":${fp}:"* ]] ; then
if [[ -e ${EROOT}${line[0]} || -e ${ED}${line[0]} ]] ; then
- # is it unprefixed, but we can just fix it because an
- # eprefixed variant exists
- eqawarn "eprefixing shebang of ${fn#${D%/}/}"
+ # is it unprefixed, but we can just fix it because a
+ # prefixed variant exists
+ eqawarn "QA Notice: prefixing shebang of ${fn#${D}}"
# statement is made idempotent on purpose, because
# symlinks may point to the same target, and hence the
# same real file may be sedded multiple times since we
@@ -102,7 +102,7 @@ install_qa_check_prefix() {
# unprefixed/invalid shebang, but outside ${PATH}, this may be
# intended (e.g. config.guess) so remain silent by default
has stricter ${FEATURES} && \
- eqawarn "invalid shebang in ${fn#${D}}: ${line[0]}"
+ eqawarn "QA Notice: invalid shebang in ${fn#${D}}: ${line[0]}"
fi
done
if [[ -e "${T}"/non-prefix-shebangs-errs ]] ; then
diff --git a/bin/install-qa-check.d/60bash-completion b/bin/install-qa-check.d/60bash-completion
index c1547619a..85573df0e 100644
--- a/bin/install-qa-check.d/60bash-completion
+++ b/bin/install-qa-check.d/60bash-completion
@@ -10,7 +10,7 @@ bashcomp_check() {
instcompdir=${ED}/usr/share/bash-completion/completions
elif [[ -d ${ED}/usr/share/bash-completion ]]; then
if [[ ${syscompdir} != ${EPREFIX}/usr/share/bash-completion ]]; then
- eqawarn "Bash completions were installed in legacy location. Please update"
+ eqawarn "QA Notice: Bash completions were installed in legacy location. Please update"
eqawarn "the ebuild to get the install paths using bash-completion-r1.eclass."
eqawarn
fi
@@ -111,7 +111,7 @@ bashcomp_check() {
done
if [[ -n ${qa_warnings[@]} ]]; then
- eqawarn "Problems with installed bash completions were found:"
+ eqawarn "QA Notice: Problems with installed bash completions were found:"
eqawarn
for c in "${qa_warnings[@]}"; do
eqawarn " ${c}"
diff --git a/bin/install-qa-check.d/90bad-bin-group-write b/bin/install-qa-check.d/90bad-bin-group-write
index 786dde712..7b5a0c02d 100644
--- a/bin/install-qa-check.d/90bad-bin-group-write
+++ b/bin/install-qa-check.d/90bad-bin-group-write
@@ -43,7 +43,7 @@ bad_bin_group_write_check() {
done
if [[ ${found[@]} ]]; then
- eqawarn "system executables group-writable by nonzero gid:"
+ eqawarn "QA Notice: system executables group-writable by nonzero gid:"
for f in "${found[@]}"; do
# Strip off the leading destdir before outputting the path.
eqawarn " ${f#${D%/}}"
diff --git a/bin/install-qa-check.d/90bad-bin-owner b/bin/install-qa-check.d/90bad-bin-owner
index 46d4e3947..74255c02f 100644
--- a/bin/install-qa-check.d/90bad-bin-owner
+++ b/bin/install-qa-check.d/90bad-bin-owner
@@ -36,7 +36,7 @@ bad_bin_owner_check() {
done
if [[ ${found[@]} ]]; then
- eqawarn "system executables owned by nonzero uid:"
+ eqawarn "QA Notice: system executables owned by nonzero uid:"
for f in "${found[@]}"; do
# Strip off the leading destdir before outputting the path.
eqawarn " ${f#${D%/}}"
diff --git a/bin/install-qa-check.d/90cmake-warnings b/bin/install-qa-check.d/90cmake-warnings
index a6e901efc..fba1b7ffb 100644
--- a/bin/install-qa-check.d/90cmake-warnings
+++ b/bin/install-qa-check.d/90cmake-warnings
@@ -13,7 +13,7 @@ cmake_warn_check() {
| LC_ALL=C sort -u)
if [[ ${vars} ]]; then
- eqawarn "One or more CMake variables were not used by the project:"
+ eqawarn "QA Notice: One or more CMake variables were not used by the project:"
local v
for v in "${vars[@]}"; do
eqawarn " ${v}"
diff --git a/bin/install-qa-check.d/90world-writable b/bin/install-qa-check.d/90world-writable
index 6a521b3cc..a166c6733 100644
--- a/bin/install-qa-check.d/90world-writable
+++ b/bin/install-qa-check.d/90world-writable
@@ -10,7 +10,7 @@ world_writable_check() {
set -f
if [[ -n ${unsafe_files} ]] ; then
- eqawarn "QA Security Notice: world writable file(s):"
+ eqawarn "QA Notice: world writable file(s):"
eqatag -v world-writable ${unsafe_files}
diff --git a/bin/install-qa-check.d/95empty-dirs b/bin/install-qa-check.d/95empty-dirs
index b8612bdf1..95c121817 100644
--- a/bin/install-qa-check.d/95empty-dirs
+++ b/bin/install-qa-check.d/95empty-dirs
@@ -26,7 +26,7 @@ find_empty_dirs() {
done < <(find "${ED}" -depth -mindepth 1 -type d -empty -print0 ${striparg} | LC_COLLATE=C sort -z)
if [[ ${warn_dirs[@]} ]]; then
- eqawarn "One or more empty directories installed to /var:"
+ eqawarn "QA Notice: One or more empty directories installed to /var:"
eqawarn
for d in "${warn_dirs[@]}"; do
eqawarn " ${d#${ED%/}}"
diff --git a/bin/phase-functions.sh b/bin/phase-functions.sh
index 7a463df7f..31b02cedf 100644
--- a/bin/phase-functions.sh
+++ b/bin/phase-functions.sh
@@ -504,7 +504,8 @@ __dyn_test() {
fi
if has test ${PORTAGE_RESTRICT} && ! has all ${ALLOW_TEST} &&
- ! { has test_network ${PORTAGE_PROPERTIES} && has network ${ALLOW_TEST}; }
+ ! { has test_network ${PORTAGE_PROPERTIES} && has network ${ALLOW_TEST}; } &&
+ ! { has test_privileged ${PORTAGE_PROPERTIES} && has privileged ${ALLOW_TEST}; }
then
einfo "Skipping make test/check due to ebuild restriction."
__vecho ">>> Test phase [disabled because of RESTRICT=test]: ${CATEGORY}/${PF}"
diff --git a/bin/phase-helpers.sh b/bin/phase-helpers.sh
index ec634e536..45a1639c4 100644
--- a/bin/phase-helpers.sh
+++ b/bin/phase-helpers.sh
@@ -907,7 +907,11 @@ __eapi8_src_prepare() {
___best_version_and_has_version_common() {
local atom root root_arg
- local -a cmd=()
+
+ # If ROOT is set to / below then SYSROOT cannot point elsewhere. Even if
+ # ROOT is untouched, setting SYSROOT=/ for this command will always work.
+ local -a cmd=(env SYSROOT=/)
+
case $1 in
--host-root|-r|-d|-b)
root_arg=$1
@@ -932,7 +936,7 @@ ___best_version_and_has_version_common() {
# Since portageq requires the root argument be consistent
# with EPREFIX, ensure consistency here (bug #655414).
root=/${PORTAGE_OVERRIDE_EPREFIX#/}
- cmd+=(env EPREFIX="${PORTAGE_OVERRIDE_EPREFIX}")
+ cmd+=(EPREFIX="${PORTAGE_OVERRIDE_EPREFIX}")
else
root=/
fi ;;
@@ -948,7 +952,7 @@ ___best_version_and_has_version_common() {
# Use /${PORTAGE_OVERRIDE_EPREFIX#/} to support older
# EAPIs, as it is equivalent to BROOT.
root=/${PORTAGE_OVERRIDE_EPREFIX#/}
- cmd+=(env EPREFIX="${PORTAGE_OVERRIDE_EPREFIX}")
+ cmd+=(EPREFIX="${PORTAGE_OVERRIDE_EPREFIX}")
;;
esac
else
diff --git a/bin/quickpkg b/bin/quickpkg
index 8443a00e6..c688c5312 100755
--- a/bin/quickpkg
+++ b/bin/quickpkg
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright 1999-2021 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import argparse
@@ -341,10 +341,6 @@ def quickpkg_main(options, args, eout):
portage.settings.features.remove("xattr")
portage.settings.lock()
- if portage.settings.get("BINPKG_GPG_SIGNING_KEY", None):
- gpg = GPG(portage.settings)
- gpg.unlock()
-
infos = {}
infos["successes"] = []
infos["missing"] = []
@@ -444,11 +440,18 @@ if __name__ == "__main__":
def sigwinch_handler(signum, frame):
lines, eout.term_columns = portage.output.get_term_size()
+ gpg = None
+ if portage.settings.get("BINPKG_GPG_SIGNING_KEY", None):
+ gpg = GPG(portage.settings)
+ gpg.unlock()
+
signal.signal(signal.SIGWINCH, sigwinch_handler)
try:
retval = quickpkg_main(options, args, eout)
finally:
os.umask(old_umask)
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ if gpg is not None:
+ gpg.stop()
global_event_loop().close()
sys.exit(retval)
diff --git a/cnf/make.conf.example.arc.diff b/cnf/make.conf.example.arc.diff
new file mode 100644
index 000000000..4775cb363
--- /dev/null
+++ b/cnf/make.conf.example.arc.diff
@@ -0,0 +1,46 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,15 @@
+ # Example:
+ #USE="X gtk gnome -alsa"
+
++# Host Setting
++# ============
++#
++# 32-bit ARC systems should use this host setting:
++CHOST="arc-snps-linux-gnu"
++
++# 64-bit ARCv3 and above systems may use this host setting:
++#CHOST="arc64-snps-linux-gnu"
++
+ # Host and optimization settings
+ # ==============================
+ #
+@@ -39,7 +48,17 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
+ #
+-# Please refer to the GCC manual for a list of possible values.
++# -mcpu=<cpu-type> compiles code that runs exclusively on a particular CPU.
++#
++# -mtune=<cpu-series> compiles code that is optimized for a particular CPU, but
++# will run on the CPU specified in -mcpu, or all ARC CPUs if -mcpu= is not
++# specified.
++#
++# -mfpu=<fpu> specifies the capabilities of the FPU to generate floating-point
++# instructions for.
++#
++# Please refer to the "ARC Options" section of the GCC manual for a list of
++# possible CPU and FPU values.
+ #
+ #CFLAGS="-O2 -pipe"
+ #
+@@ -76,7 +95,7 @@
+ # DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
+ # IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
+ #
+-#ACCEPT_KEYWORDS="~arch"
++#ACCEPT_KEYWORDS="~arc"
+
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.globals b/cnf/make.globals
index 116b76cd7..a5931a177 100644
--- a/cnf/make.globals
+++ b/cnf/make.globals
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
# System-wide defaults for the Portage system
@@ -38,9 +38,9 @@ PORTAGE_TMPDIR="/var/tmp"
# existing installs where bzip2 is used for backward compatibility.
BINPKG_COMPRESS="zstd"
-# The format used for binary packages. The default is use old "xpak" format.
-# Set to "gpkg" to use new gentoo binary package format.
-BINPKG_FORMAT="xpak"
+# The format used for binary packages. The default is to use the new "gpkg" format.
+# Set to "xpak" to use the old gentoo binary package format.
+BINPKG_FORMAT="gpkg"
# The binary package default GPG signing command.
# flock is used to avoid a racing condition of gnupg
@@ -77,7 +77,7 @@ FETCHCOMMAND_SFTP="bash -c \"x=\\\${2#sftp://} ; host=\\\${x%%/*} ; port=\\\${ho
FEATURES="assume-digests binpkg-docompress binpkg-dostrip binpkg-logs
binpkg-multi-instance buildpkg-live
config-protect-if-modified distlocks ebuild-locks
- fixlafiles ipc-sandbox merge-sync multilib-strict
+ fixlafiles ipc-sandbox merge-sync merge-wait multilib-strict
network-sandbox news parallel-fetch pkgdir-index-trusted pid-sandbox
preserve-libs protect-owned qa-unresolved-soname-deps
sandbox sfperms strict
diff --git a/lib/_emerge/AbstractDepPriority.py b/lib/_emerge/AbstractDepPriority.py
index a9616c109..3af262cd7 100644
--- a/lib/_emerge/AbstractDepPriority.py
+++ b/lib/_emerge/AbstractDepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2013 Gentoo Foundation
+# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import copy
@@ -9,6 +9,7 @@ class AbstractDepPriority(SlotObject):
__slots__ = (
"buildtime",
"buildtime_slot_op",
+ "installtime",
"runtime",
"runtime_post",
"runtime_slot_op",
diff --git a/lib/_emerge/AsynchronousTask.py b/lib/_emerge/AsynchronousTask.py
index 4290eede3..4049ba5eb 100644
--- a/lib/_emerge/AsynchronousTask.py
+++ b/lib/_emerge/AsynchronousTask.py
@@ -46,9 +46,9 @@ class AsynchronousTask(SlotObject):
)
self.addExitListener(exit_listener)
waiter.add_done_callback(
- lambda waiter: self.removeExitListener(exit_listener)
- if waiter.cancelled()
- else None
+ lambda waiter: (
+ self.removeExitListener(exit_listener) if waiter.cancelled() else None
+ )
)
if self.returncode is not None:
# If the returncode is not None, it means the exit event has already
diff --git a/lib/_emerge/Binpkg.py b/lib/_emerge/Binpkg.py
index 9b1036538..299ae7fbc 100644
--- a/lib/_emerge/Binpkg.py
+++ b/lib/_emerge/Binpkg.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2023 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import io
+import sys
import functools
import _emerge.emergelog
from _emerge.EbuildPhase import EbuildPhase
@@ -244,12 +246,36 @@ class Binpkg(CompositeTask):
pkg_count = self.pkg_count
if self._fetched_pkg:
- pkg_path = self._bintree.getname(
- self._bintree.inject(
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ out = io.StringIO()
+ try:
+ sys.stdout = out
+ sys.stderr = out
+
+ injected_pkg = self._bintree.inject(
pkg.cpv,
current_pkg_path=self._fetched_pkg,
allocated_pkg_path=self._pkg_allocated_path,
- ),
+ )
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+
+ output_value = out.getvalue()
+ if output_value:
+ self.scheduler.output(
+ output_value,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"),
+ background=self.background,
+ )
+
+ if injected_pkg is None:
+ self._async_unlock_builddir(returncode=1)
+ return
+
+ pkg_path = self._bintree.getname(
+ injected_pkg,
allocate_new=False,
)
else:
diff --git a/lib/_emerge/BinpkgFetcher.py b/lib/_emerge/BinpkgFetcher.py
index 10f9b6e42..a1524dc00 100644
--- a/lib/_emerge/BinpkgFetcher.py
+++ b/lib/_emerge/BinpkgFetcher.py
@@ -1,8 +1,6 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-import functools
-
from _emerge.AsynchronousLock import AsynchronousLock
from _emerge.CompositeTask import CompositeTask
from _emerge.SpawnProcess import SpawnProcess
@@ -14,6 +12,7 @@ from portage import os
from portage.binpkg import get_binpkg_format
from portage.exception import FileNotFound
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util._async.FileCopier import FileCopier
from portage.util._pty import _create_pty_or_pipe
@@ -40,6 +39,22 @@ class BinpkgFetcher(CompositeTask):
self.pkg_path = self.pkg_allocated_path + ".partial"
def _start(self):
+ self._start_task(
+ AsyncTaskFuture(future=self._main(), scheduler=self.scheduler),
+ self._main_exit,
+ )
+
+ async def _main(self) -> int:
+ """
+ Main coroutine which saves the binary package to self.pkg_path
+ and returns the exit status of the fetcher or copier.
+
+ @rtype: int
+ @return: Exit status of fetcher or copier.
+ """
+ pkg = self.pkg
+ bintree = pkg.root_config.trees["bintree"]
+
fetcher = _BinpkgFetcherProcess(
background=self.background,
logfile=self.logfile,
@@ -52,47 +67,68 @@ class BinpkgFetcher(CompositeTask):
if not self.pretend:
portage.util.ensure_dirs(os.path.dirname(self.pkg_path))
if "distlocks" in self.pkg.root_config.settings.features:
- self._start_task(
- AsyncTaskFuture(future=fetcher.async_lock()),
- functools.partial(self._start_locked, fetcher),
- )
- return
-
- self._start_task(fetcher, self._fetcher_exit)
-
- def _start_locked(self, fetcher, lock_task):
- self._assert_current(lock_task)
- if lock_task.cancelled:
- self._default_final_exit(lock_task)
- return
-
- lock_task.future.result()
- self._start_task(fetcher, self._fetcher_exit)
-
- def _fetcher_exit(self, fetcher):
- self._assert_current(fetcher)
- if not self.pretend and fetcher.returncode == os.EX_OK:
- fetcher.sync_timestamp()
- if fetcher.locked:
- self._start_task(
- AsyncTaskFuture(future=fetcher.async_unlock()),
- functools.partial(self._fetcher_exit_unlocked, fetcher),
- )
- else:
- self._fetcher_exit_unlocked(fetcher)
-
- def _fetcher_exit_unlocked(self, fetcher, unlock_task=None):
- if unlock_task is not None:
- self._assert_current(unlock_task)
- if unlock_task.cancelled:
- self._default_final_exit(unlock_task)
- return
+ await fetcher.async_lock()
+
+ try:
+ if bintree._remote_has_index:
+ remote_metadata = bintree._remotepkgs[
+ bintree.dbapi._instance_key(pkg.cpv)
+ ]
+ rel_uri = remote_metadata.get("PATH")
+ if not rel_uri:
+ # Assume that the remote index is out of date. No path should
+ # never happen in new portage versions.
+ rel_uri = pkg.cpv + ".tbz2"
+ remote_base_uri = remote_metadata["BASE_URI"]
+ uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
+ else:
+ raise FileNotFound("Binary packages index not found")
- unlock_task.future.result()
+ uri_parsed = urllib_parse_urlparse(uri)
- self._current_task = None
- self.returncode = fetcher.returncode
- self._async_wait()
+ copier = None
+ if not self.pretend and uri_parsed.scheme in ("", "file"):
+ copier = FileCopier(
+ src_path=uri_parsed.path,
+ dest_path=self.pkg_path,
+ scheduler=self.scheduler,
+ )
+ copier.start()
+ try:
+ await copier.async_wait()
+ copier.future.result()
+ except FileNotFoundError:
+ await self.scheduler.async_output(
+ f"!!! File not found: {uri_parsed.path}\n",
+ log_file=self.logfile,
+ background=self.background,
+ )
+ finally:
+ if copier.isAlive():
+ copier.cancel()
+ if copier.returncode == os.EX_OK:
+ fetcher.sync_timestamp()
+ else:
+ fetcher.start()
+ try:
+ await fetcher.async_wait()
+ finally:
+ if fetcher.isAlive():
+ fetcher.cancel()
+
+ if not self.pretend and fetcher.returncode == os.EX_OK:
+ fetcher.sync_timestamp()
+ finally:
+ if fetcher.locked:
+ await fetcher.async_unlock()
+
+ return fetcher.returncode if copier is None else copier.returncode
+
+ def _main_exit(self, main_task):
+ if not main_task.cancelled:
+ # Use the fetcher or copier returncode.
+ main_task.returncode = main_task.future.result()
+ self._default_final_exit(main_task)
class _BinpkgFetcherProcess(SpawnProcess):
diff --git a/lib/_emerge/BinpkgPrefetcher.py b/lib/_emerge/BinpkgPrefetcher.py
index 37dbe0a40..ed68d2852 100644
--- a/lib/_emerge/BinpkgPrefetcher.py
+++ b/lib/_emerge/BinpkgPrefetcher.py
@@ -1,6 +1,9 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import io
+import sys
+
from _emerge.BinpkgFetcher import BinpkgFetcher
from _emerge.CompositeTask import CompositeTask
from _emerge.BinpkgVerifier import BinpkgVerifier
@@ -45,12 +48,31 @@ class BinpkgPrefetcher(CompositeTask):
self.wait()
return
- self._bintree.inject(
- self.pkg.cpv,
- current_pkg_path=self.pkg_path,
- allocated_pkg_path=self.pkg_allocated_path,
- )
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ out = io.StringIO()
+ try:
+ sys.stdout = out
+ sys.stderr = out
+
+ injected_pkg = self._bintree.inject(
+ self.pkg.cpv,
+ current_pkg_path=self.pkg_path,
+ allocated_pkg_path=self.pkg_allocated_path,
+ )
+
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+
+ output_value = out.getvalue()
+ if output_value:
+ self.scheduler.output(
+ output_value,
+ log_path=self.scheduler.fetch.log_file,
+ background=self.background,
+ )
self._current_task = None
- self.returncode = os.EX_OK
+ self.returncode = 1 if injected_pkg is None else os.EX_OK
self.wait()
diff --git a/lib/_emerge/BinpkgVerifier.py b/lib/_emerge/BinpkgVerifier.py
index a7917453a..7e044c6c4 100644
--- a/lib/_emerge/BinpkgVerifier.py
+++ b/lib/_emerge/BinpkgVerifier.py
@@ -41,11 +41,23 @@ class BinpkgVerifier(CompositeTask):
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
raise
- self.scheduler.output(
- f"!!! Fetching Binary failed for '{self.pkg.cpv}'\n",
- log_path=self.logfile,
- background=self.background,
- )
+
+ # We might end up here with FEATURES="pkgdir-index-trusted" if
+ # binpkgs have been removed manually without refreshing the index.
+ if bintree.dbapi.cpv_exists(self.pkg.cpv):
+ self.scheduler.output(
+ f"!!! Tried to use non-existent binary for '{self.pkg.cpv}'\n"
+ + f"!!! Likely caused by an outdated index. Run 'emaint binhost -f'.\n",
+ log_path=self.logfile,
+ background=self.background,
+ )
+ else:
+ self.scheduler.output(
+ f"!!! Fetching Binary failed for '{self.pkg.cpv}'\n",
+ log_path=self.logfile,
+ background=self.background,
+ )
+
self.returncode = 1
self._async_wait()
return
diff --git a/lib/_emerge/DepPriority.py b/lib/_emerge/DepPriority.py
index 99d38477e..8d282b937 100644
--- a/lib/_emerge/DepPriority.py
+++ b/lib/_emerge/DepPriority.py
@@ -1,11 +1,11 @@
-# Copyright 1999-2013 Gentoo Foundation
+# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
class DepPriority(AbstractDepPriority):
- __slots__ = ("satisfied", "optional", "ignored")
+ __slots__ = ("cross", "ignored", "optional", "satisfied")
def __int__(self):
"""
diff --git a/lib/_emerge/DepPriorityNormalRange.py b/lib/_emerge/DepPriorityNormalRange.py
index d7e4381b4..cb0e6c26b 100644
--- a/lib/_emerge/DepPriorityNormalRange.py
+++ b/lib/_emerge/DepPriorityNormalRange.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from _emerge.DepPriority import DepPriority
@@ -41,7 +41,7 @@ class DepPriorityNormalRange:
# to adjust this appropriately. But only build time dependencies
# are optional right now, so it's not an issue as-is.
return bool(
- not priority.runtime_slot_op
+ not (priority.runtime_slot_op and not priority.cross)
and (priority.optional or not priority.buildtime)
)
diff --git a/lib/_emerge/DepPrioritySatisfiedRange.py b/lib/_emerge/DepPrioritySatisfiedRange.py
index 0d42e7613..b3bc90c2f 100644
--- a/lib/_emerge/DepPrioritySatisfiedRange.py
+++ b/lib/_emerge/DepPrioritySatisfiedRange.py
@@ -96,6 +96,7 @@ class DepPrioritySatisfiedRange:
(
(not priority.runtime_slot_op)
or (priority.satisfied and priority.runtime_slot_op)
+ or priority.cross
)
and (priority.satisfied or priority.optional or not priority.buildtime)
)
diff --git a/lib/_emerge/EbuildBinpkg.py b/lib/_emerge/EbuildBinpkg.py
index cb01f73d7..97b69ae01 100644
--- a/lib/_emerge/EbuildBinpkg.py
+++ b/lib/_emerge/EbuildBinpkg.py
@@ -1,6 +1,9 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import io
+import sys
+
from _emerge.CompositeTask import CompositeTask
from _emerge.EbuildPhase import EbuildPhase
@@ -55,14 +58,34 @@ class EbuildBinpkg(CompositeTask):
pkg = self.pkg
bintree = pkg.root_config.trees["bintree"]
- self._binpkg_info = bintree.inject(
- pkg.cpv,
- current_pkg_path=self._binpkg_tmpfile,
- allocated_pkg_path=self.pkg_allocated_path,
- )
+
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ out = io.StringIO()
+ try:
+ sys.stdout = out
+ sys.stderr = out
+
+ self._binpkg_info = bintree.inject(
+ pkg.cpv,
+ current_pkg_path=self._binpkg_tmpfile,
+ allocated_pkg_path=self.pkg_allocated_path,
+ )
+
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+
+ output_value = out.getvalue()
+ if output_value:
+ self.scheduler.output(
+ output_value,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"),
+ background=self.background,
+ )
self._current_task = None
- self.returncode = os.EX_OK
+ self.returncode = 1 if self._binpkg_info is None else os.EX_OK
self.wait()
def get_binpkg_info(self):
diff --git a/lib/_emerge/EbuildBuild.py b/lib/_emerge/EbuildBuild.py
index 81cbfdc08..d4a4c6dac 100644
--- a/lib/_emerge/EbuildBuild.py
+++ b/lib/_emerge/EbuildBuild.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -22,6 +22,7 @@ from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.doebuild import _check_temp_dir
from portage.package.ebuild._spawn_nofetch import SpawnNofetchWithoutBuilddir
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util.futures.executor.fork import ForkExecutor
from portage.util.path import first_existing
@@ -152,29 +153,25 @@ class EbuildBuild(CompositeTask):
if opts.fetchonly:
if opts.pretend:
fetcher = EbuildFetchonly(
+ ebuild_path=self._ebuild_path,
fetch_all=opts.fetch_all_uri,
pkg=pkg,
pretend=opts.pretend,
settings=settings,
)
- retval = fetcher.execute()
- if retval == os.EX_OK:
- self._current_task = None
- self.returncode = os.EX_OK
- self._async_wait()
- else:
- # For pretend mode, the convention it to execute
- # pkg_nofetch and return a successful exitcode.
- self._start_task(
- SpawnNofetchWithoutBuilddir(
- background=self.background,
- portdb=self.pkg.root_config.trees[self._tree].dbapi,
- ebuild_path=self._ebuild_path,
- scheduler=self.scheduler,
- settings=self.settings,
+ # Execute EbuildFetchonly in a subprocess since it needs to
+ # run the event loop itself (even for pretend mode since it
+ # may need to fetch mirror layouts as reported in bug 702154).
+ self._start_task(
+ AsyncTaskFuture(
+ background=self.background,
+ scheduler=self.scheduler,
+ future=self.scheduler.run_in_executor(
+ ForkExecutor(loop=self.scheduler), fetcher.execute
),
- self._default_final_exit,
- )
+ ),
+ self._fetchonly_exit,
+ )
return
quiet_setting = settings.get("PORTAGE_QUIET", False)
@@ -241,8 +238,12 @@ class EbuildBuild(CompositeTask):
self._start_task(pre_clean_phase, self._pre_clean_exit)
def _fetchonly_exit(self, fetcher):
+ if not fetcher.cancelled and isinstance(fetcher, AsyncTaskFuture):
+ # Set returncode from EbuildFetchonly.execute() result, since
+ # it can fail if it can't resolve a mirror for a file.
+ fetcher.returncode = fetcher.future.result()
self._final_exit(fetcher)
- if self.returncode != os.EX_OK:
+ if not self.cancelled and self.returncode != os.EX_OK:
self.returncode = None
portdb = self.pkg.root_config.trees[self._tree].dbapi
self._start_task(
diff --git a/lib/_emerge/EbuildFetchonly.py b/lib/_emerge/EbuildFetchonly.py
index e887dd858..c806122be 100644
--- a/lib/_emerge/EbuildFetchonly.py
+++ b/lib/_emerge/EbuildFetchonly.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import portage
@@ -8,20 +8,16 @@ from portage.util.SlotObject import SlotObject
class EbuildFetchonly(SlotObject):
- __slots__ = ("fetch_all", "pkg", "pretend", "settings")
+ __slots__ = ("ebuild_path", "fetch_all", "pkg", "pretend", "settings")
def execute(self):
settings = self.settings
pkg = self.pkg
portdb = pkg.root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
- if ebuild_path is None:
- raise AssertionError(f"ebuild not found for '{pkg.cpv}'")
- settings.setcpv(pkg)
debug = settings.get("PORTAGE_DEBUG") == "1"
rval = portage.doebuild(
- ebuild_path,
+ self.ebuild_path,
"fetch",
settings=settings,
debug=debug,
diff --git a/lib/_emerge/EbuildMetadataPhase.py b/lib/_emerge/EbuildMetadataPhase.py
index fd695e025..54177840c 100644
--- a/lib/_emerge/EbuildMetadataPhase.py
+++ b/lib/_emerge/EbuildMetadataPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from _emerge.SubProcess import SubProcess
@@ -8,18 +8,19 @@ import portage
portage.proxy.lazyimport.lazyimport(
globals(),
+ "_emerge.EbuildPhase:_setup_locale",
"portage.package.ebuild._metadata_invalid:eapi_invalid",
)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
+from portage.util.futures import asyncio
import fcntl
class EbuildMetadataPhase(SubProcess):
-
"""
Asynchronous interface for the ebuild "depend" phase which is
used to extract metadata from the ebuild.
@@ -34,6 +35,7 @@ class EbuildMetadataPhase(SubProcess):
"portdb",
"repo_path",
"settings",
+ "deallocate_config",
"write_auxdb",
) + (
"_eapi",
@@ -45,6 +47,12 @@ class EbuildMetadataPhase(SubProcess):
_files_dict = slot_dict_class(_file_names, prefix="")
def _start(self):
+ asyncio.ensure_future(
+ self._async_start(), loop=self.scheduler
+ ).add_done_callback(self._async_start_done)
+ self._registered = True
+
+ async def _async_start(self):
ebuild_path = self.ebuild_hash.location
with open(
@@ -76,6 +84,9 @@ class EbuildMetadataPhase(SubProcess):
settings.setcpv(self.cpv)
settings.configdict["pkg"]["EAPI"] = parsed_eapi
+ # This requires above setcpv and EAPI setup.
+ await _setup_locale(self.settings)
+
debug = settings.get("PORTAGE_DEBUG") == "1"
master_fd = None
slave_fd = None
@@ -115,7 +126,6 @@ class EbuildMetadataPhase(SubProcess):
self._raw_metadata = []
files.ebuild = master_fd
self.scheduler.add_reader(files.ebuild, self._output_handler)
- self._registered = True
retval = portage.doebuild(
ebuild_path,
@@ -125,9 +135,18 @@ class EbuildMetadataPhase(SubProcess):
mydbapi=self.portdb,
tree="porttree",
fd_pipes=fd_pipes,
- returnpid=True,
+ returnproc=True,
)
settings.pop("PORTAGE_PIPE_FD", None)
+ # At this point we can return settings to the caller
+ # since we never use it for anything more than an
+ # eapi_invalid call after this, and eapi_invalid is
+ # insensitive to concurrent modifications.
+ if (
+ self.deallocate_config is not None
+ and not self.deallocate_config.cancelled()
+ ):
+ self.deallocate_config.set_result(settings)
os.close(slave_fd)
null_input.close()
@@ -138,7 +157,20 @@ class EbuildMetadataPhase(SubProcess):
self._async_wait()
return
- self.pid = retval[0]
+ self._proc = retval
+
+ def _async_start_done(self, future):
+ future.cancelled() or future.result()
+ if not self._was_cancelled() and future.cancelled():
+ self.cancel()
+ self._was_cancelled()
+
+ if self.deallocate_config is not None and not self.deallocate_config.done():
+ self.deallocate_config.set_result(self.settings)
+
+ if self.returncode is not None:
+ self._unregister()
+ self.wait()
def _output_handler(self):
while True:
@@ -200,12 +232,10 @@ class EbuildMetadataPhase(SubProcess):
# entries for unsupported EAPIs.
if self.eapi_supported:
if metadata.get("INHERITED", False):
- metadata[
- "_eclasses_"
- ] = self.portdb.repositories.get_repo_for_location(
- self.repo_path
- ).eclass_db.get_eclass_data(
- metadata["INHERITED"].split()
+ metadata["_eclasses_"] = (
+ self.portdb.repositories.get_repo_for_location(
+ self.repo_path
+ ).eclass_db.get_eclass_data(metadata["INHERITED"].split())
)
else:
metadata["_eclasses_"] = {}
diff --git a/lib/_emerge/EbuildPhase.py b/lib/_emerge/EbuildPhase.py
index c309b4c94..9dbd79fd1 100644
--- a/lib/_emerge/EbuildPhase.py
+++ b/lib/_emerge/EbuildPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2021 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -24,6 +24,7 @@ from portage.package.ebuild.prepare_build_dirs import (
_prepare_fake_distdir,
_prepare_fake_filesdir,
)
+from portage.eapi import _get_eapi_attrs
from portage.util import writemsg, ensure_dirs
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
from portage.util._async.BuildLogger import BuildLogger
@@ -56,12 +57,34 @@ portage.proxy.lazyimport.lazyimport(
+ "_post_src_install_write_metadata,"
+ "_preinst_bsdflags",
"portage.util.futures.unix_events:_set_nonblocking",
+ "portage.util.locale:async_check_locale,split_LC_ALL",
)
from portage import os
from portage import _encodings
from portage import _unicode_encode
+async def _setup_locale(settings):
+ eapi_attrs = _get_eapi_attrs(settings["EAPI"])
+ if eapi_attrs.posixish_locale:
+ split_LC_ALL(settings)
+ settings["LC_COLLATE"] = "C"
+ # check_locale() returns None when check can not be executed.
+ if await async_check_locale(silent=True, env=settings.environ()) is False:
+ # try another locale
+ for l in ("C.UTF-8", "en_US.UTF-8", "en_GB.UTF-8", "C"):
+ settings["LC_CTYPE"] = l
+ if await async_check_locale(silent=True, env=settings.environ()):
+ # TODO: output the following only once
+ # writemsg(
+ # _("!!! LC_CTYPE unsupported, using %s instead\n")
+ # % self.settings["LC_CTYPE"]
+ # )
+ break
+ else:
+ raise AssertionError("C locale did not pass the test!")
+
+
class EbuildPhase(CompositeTask):
__slots__ = ("actionmap", "fd_pipes", "phase", "settings") + ("_ebuild_lock",)
@@ -69,6 +92,7 @@ class EbuildPhase(CompositeTask):
_features_display = (
"ccache",
"compressdebug",
+ "dedupdebug",
"distcc",
"fakeroot",
"installsources",
@@ -96,6 +120,9 @@ class EbuildPhase(CompositeTask):
self._start_task(AsyncTaskFuture(future=future), self._async_start_exit)
async def _async_start(self):
+
+ await _setup_locale(self.settings)
+
need_builddir = self.phase not in EbuildProcess._phases_without_builddir
if need_builddir:
diff --git a/lib/_emerge/MergeListItem.py b/lib/_emerge/MergeListItem.py
index efe485c2e..ae894704a 100644
--- a/lib/_emerge/MergeListItem.py
+++ b/lib/_emerge/MergeListItem.py
@@ -13,7 +13,6 @@ from _emerge.PackageUninstall import PackageUninstall
class MergeListItem(CompositeTask):
-
"""
TODO: For parallel scheduling, everything here needs asynchronous
execution support (start, poll, and wait methods).
diff --git a/lib/_emerge/MetadataRegen.py b/lib/_emerge/MetadataRegen.py
index d29722b94..538a94b45 100644
--- a/lib/_emerge/MetadataRegen.py
+++ b/lib/_emerge/MetadataRegen.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
@@ -44,6 +44,7 @@ class MetadataRegen(AsyncScheduler):
valid_pkgs = self._valid_pkgs
cp_set = self._cp_set
consumer = self._consumer
+ config_pool = []
portage.writemsg_stdout("Regenerating cache entries...\n")
for cp in self._cp_iter:
@@ -73,12 +74,23 @@ class MetadataRegen(AsyncScheduler):
consumer(cpv, repo_path, metadata, ebuild_hash, True)
continue
+ if config_pool:
+ settings = config_pool.pop()
+ else:
+ settings = portage.config(clone=portdb.settings)
+
+ deallocate_config = self.scheduler.create_future()
+ deallocate_config.add_done_callback(
+ lambda future: config_pool.append(future.result())
+ )
+
yield EbuildMetadataPhase(
cpv=cpv,
ebuild_hash=ebuild_hash,
portdb=portdb,
repo_path=repo_path,
- settings=portdb.doebuild_settings,
+ settings=settings,
+ deallocate_config=deallocate_config,
write_auxdb=self._write_auxdb,
)
diff --git a/lib/_emerge/PipeReader.py b/lib/_emerge/PipeReader.py
index 026346e0b..76ab7f188 100644
--- a/lib/_emerge/PipeReader.py
+++ b/lib/_emerge/PipeReader.py
@@ -8,7 +8,6 @@ from _emerge.AbstractPollTask import AbstractPollTask
class PipeReader(AbstractPollTask):
-
"""
Reads output from one or more files and saves it in memory,
for retrieval via the getvalue() method. This is driven by
diff --git a/lib/_emerge/Scheduler.py b/lib/_emerge/Scheduler.py
index 66eca4c65..9950792dc 100644
--- a/lib/_emerge/Scheduler.py
+++ b/lib/_emerge/Scheduler.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2023 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from collections import deque
@@ -930,6 +930,7 @@ class Scheduler(PollScheduler):
current_task = clean_phase
clean_phase.start()
await clean_phase.async_wait()
+ current_task = None
if x.built:
tree = "bintree"
@@ -981,16 +982,34 @@ class Scheduler(PollScheduler):
self._record_pkg_failure(x, settings, verifier.returncode)
continue
+ current_task = None
if fetched:
- bintree.inject(
+ if not bintree.inject(
x.cpv,
current_pkg_path=fetched,
allocated_pkg_path=fetcher.pkg_allocated_path,
- )
+ ):
+ eerror(
+ "Binary package is not usable",
+ phase="pretend",
+ key=x.cpv,
+ )
+ failures += 1
+ self._record_pkg_failure(x, settings, 1)
+ continue
infloc = os.path.join(build_dir_path, "build-info")
ensure_dirs(infloc)
- await bintree.dbapi.unpack_metadata(settings, infloc, loop=loop)
+ try:
+ await bintree.dbapi.unpack_metadata(settings, infloc, loop=loop)
+ except portage.exception.SignatureException as e:
+ writemsg(
+ f"!!! Invalid binary package: '{bintree.getname(x.cpv)}', {e}\n",
+ noiselevel=-1,
+ )
+ failures += 1
+ self._record_pkg_failure(x, settings, 1)
+ continue
ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
@@ -1030,23 +1049,27 @@ class Scheduler(PollScheduler):
current_task = pretend_phase
pretend_phase.start()
ret = await pretend_phase.async_wait()
+ # Leave current_task assigned in order to trigger clean
+ # on success in the below finally block.
if ret != os.EX_OK:
failures += 1
self._record_pkg_failure(x, settings, ret)
- portage.elog.elog_process(x.cpv, settings)
finally:
if current_task is not None:
if current_task.isAlive():
current_task.cancel()
- if current_task.returncode == os.EX_OK:
- clean_phase = EbuildPhase(
- background=False,
- phase="clean",
- scheduler=sched_iface,
- settings=settings,
- )
- clean_phase.start()
- await clean_phase.async_wait()
+
+ portage.elog.elog_process(x.cpv, settings)
+
+ if current_task is not None and current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(
+ background=False,
+ phase="clean",
+ scheduler=sched_iface,
+ settings=settings,
+ )
+ clean_phase.start()
+ await clean_phase.async_wait()
await build_dir.async_unlock()
self._deallocate_config(settings)
@@ -1498,14 +1521,16 @@ class Scheduler(PollScheduler):
self.curval += 1
merge = PackageMerge(merge=build, scheduler=self._sched_iface)
self._running_tasks[id(merge)] = merge
- if (
- not build.build_opts.buildpkgonly
- and build.pkg in self._deep_system_deps
+ # By default, merge-wait only allows merge when no builds are executing.
+ # As a special exception, dependencies on system packages are frequently
+ # unspecified and will therefore force merge-wait.
+ is_system_pkg = build.pkg in self._deep_system_deps
+ if not build.build_opts.buildpkgonly and (
+ "merge-wait" in build.settings.features or is_system_pkg
):
- # Since dependencies on system packages are frequently
- # unspecified, merge them only when no builds are executing.
self._merge_wait_queue.append(merge)
- merge.addStartListener(self._system_merge_started)
+ if is_system_pkg:
+ merge.addStartListener(self._system_merge_started)
else:
self._task_queues.merge.add(merge)
merge.addExitListener(self._merge_exit)
diff --git a/lib/_emerge/SpawnProcess.py b/lib/_emerge/SpawnProcess.py
index 72fa72c61..b63afae01 100644
--- a/lib/_emerge/SpawnProcess.py
+++ b/lib/_emerge/SpawnProcess.py
@@ -1,4 +1,4 @@
-# Copyright 2008-2023 Gentoo Authors
+# Copyright 2008-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -16,7 +16,6 @@ from portage.util.futures import asyncio
class SpawnProcess(SubProcess):
-
"""
Constructor keyword args are passed into portage.process.spawn().
The required "args" keyword argument will be passed as the first
@@ -42,7 +41,7 @@ class SpawnProcess(SubProcess):
)
__slots__ = (
- ("args", "log_filter_file")
+ ("args", "create_pipe", "log_filter_file")
+ _spawn_kwarg_names
+ (
"_main_task",
@@ -61,15 +60,30 @@ class SpawnProcess(SubProcess):
else:
self.fd_pipes = self.fd_pipes.copy()
fd_pipes = self.fd_pipes
+ log_file_path = None
if fd_pipes or self.logfile or not self.background:
- master_fd, slave_fd = self._pipe(fd_pipes)
+ if self.create_pipe is not False:
+ master_fd, slave_fd = self._pipe(fd_pipes)
- can_log = self._can_log(slave_fd)
- if can_log:
- log_file_path = self.logfile
+ can_log = self._can_log(slave_fd)
+ if can_log:
+ log_file_path = self.logfile
else:
- log_file_path = None
+ if self.logfile:
+ raise NotImplementedError(
+ "logfile conflicts with create_pipe=False"
+ )
+ # When called via process.spawn and ForkProcess._start,
+ # SpawnProcess will have created a pipe earlier, so it
+ # would be redundant to do it here (it could also trigger
+ # spawn recursion via set_term_size as in bug 923750).
+ # Use /dev/null for master_fd, triggering early return
+ # of _main, followed by _async_waitpid.
+ # TODO: Optimize away the need for master_fd here.
+ master_fd = os.open(os.devnull, os.O_RDONLY)
+ slave_fd = None
+ can_log = False
null_input = None
if not self.background or 0 in fd_pipes:
@@ -98,7 +112,9 @@ class SpawnProcess(SubProcess):
fd_pipes_orig = fd_pipes.copy()
- if log_file_path is not None or self.background:
+ if slave_fd is None:
+ pass
+ elif log_file_path is not None or self.background:
fd_pipes[1] = slave_fd
fd_pipes[2] = slave_fd
@@ -124,24 +140,16 @@ class SpawnProcess(SubProcess):
kwargs[k] = v
kwargs["fd_pipes"] = fd_pipes
- kwargs["returnpid"] = True
+ kwargs["returnproc"] = True
kwargs.pop("logfile", None)
- retval = self._spawn(self.args, **kwargs)
+ self._proc = self._spawn(self.args, **kwargs)
if slave_fd is not None:
os.close(slave_fd)
if null_input is not None:
os.close(null_input)
- if isinstance(retval, int):
- # spawn failed
- self.returncode = retval
- self._async_wait()
- return
-
- self.pid = retval[0]
-
if not fd_pipes:
self._registered = True
self._async_waitpid()
@@ -233,7 +241,9 @@ class SpawnProcess(SubProcess):
got_pty, master_fd, slave_fd = _create_pty_or_pipe(copy_term_size=stdout_pipe)
return (master_fd, slave_fd)
- def _spawn(self, args, **kwargs):
+ def _spawn(
+ self, args: list[str], **kwargs
+ ) -> portage.process.MultiprocessingProcess:
spawn_func = portage.process.spawn
if self._selinux_type is not None:
diff --git a/lib/_emerge/SubProcess.py b/lib/_emerge/SubProcess.py
index b734591d1..057e0adc2 100644
--- a/lib/_emerge/SubProcess.py
+++ b/lib/_emerge/SubProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -12,12 +12,19 @@ import errno
class SubProcess(AbstractPollTask):
- __slots__ = ("pid",) + ("_dummy_pipe_fd", "_files", "_waitpid_id")
+ __slots__ = ("_dummy_pipe_fd", "_files", "_proc", "_waitpid_id")
# This is how much time we allow for waitpid to succeed after
# we've sent a kill signal to our subprocess.
_cancel_timeout = 1 # seconds
+ def isAlive(self):
+ return (self._registered or self.pid is not None) and self.returncode is None
+
+ @property
+ def pid(self):
+ return None if self._proc is None else self._proc.pid
+
def _poll(self):
# Simply rely on _async_waitpid_cb to set the returncode.
return self.returncode
@@ -58,15 +65,11 @@ class SubProcess(AbstractPollTask):
if self.returncode is not None:
self._async_wait()
elif self._waitpid_id is None:
- self._waitpid_id = self.pid
- self.scheduler._asyncio_child_watcher.add_child_handler(
- self.pid, self._async_waitpid_cb
- )
-
- def _async_waitpid_cb(self, pid, returncode):
- if pid != self.pid:
- raise AssertionError(f"expected pid {self.pid}, got {pid}")
- self.returncode = returncode
+ self._waitpid_id = asyncio.ensure_future(self._proc.wait(), self.scheduler)
+ self._waitpid_id.add_done_callback(self._async_waitpid_cb)
+
+ def _async_waitpid_cb(self, future):
+ self.returncode = future.result()
self._async_wait()
def _orphan_process_warn(self):
@@ -80,7 +83,8 @@ class SubProcess(AbstractPollTask):
self._registered = False
if self._waitpid_id is not None:
- self.scheduler._asyncio_child_watcher.remove_child_handler(self._waitpid_id)
+ if not self._waitpid_id.done():
+ self._waitpid_id.cancel()
self._waitpid_id = None
if self._files is not None:
diff --git a/lib/_emerge/UnmergeDepPriority.py b/lib/_emerge/UnmergeDepPriority.py
index ff81eff46..b14f8b84e 100644
--- a/lib/_emerge/UnmergeDepPriority.py
+++ b/lib/_emerge/UnmergeDepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2013 Gentoo Foundation
+# Copyright 1999-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -6,23 +6,25 @@ from _emerge.AbstractDepPriority import AbstractDepPriority
class UnmergeDepPriority(AbstractDepPriority):
__slots__ = (
+ "cross",
"ignored",
"optional",
"satisfied",
)
"""
- Combination of properties Priority Category
-
- runtime_slot_op 0 HARD
- runtime -1 HARD
- runtime_post -2 HARD
- buildtime -3 SOFT
- (none of the above) -3 SOFT
- """
+ Combination of properties Priority Category
+
+ installtime 0 HARD
+ runtime_slot_op -1 HARD
+ runtime -2 HARD
+ runtime_post -3 HARD
+ buildtime -4 SOFT
+ (none of the above) -4 SOFT
+ """
MAX = 0
- SOFT = -3
- MIN = -3
+ SOFT = -4
+ MIN = -4
def __init__(self, **kwargs):
AbstractDepPriority.__init__(self, **kwargs)
@@ -30,19 +32,23 @@ class UnmergeDepPriority(AbstractDepPriority):
self.optional = True
def __int__(self):
- if self.runtime_slot_op:
+ if self.installtime:
return 0
- if self.runtime:
+ if self.runtime_slot_op:
return -1
- if self.runtime_post:
+ if self.runtime:
return -2
- if self.buildtime:
+ if self.runtime_post:
return -3
- return -3
+ if self.buildtime:
+ return -4
+ return -4
def __str__(self):
if self.ignored:
return "ignored"
+ if self.installtime:
+ return "install time"
if self.runtime_slot_op:
return "hard slot op"
myvalue = self.__int__()
diff --git a/lib/_emerge/actions.py b/lib/_emerge/actions.py
index dbd9707a8..d36a79924 100644
--- a/lib/_emerge/actions.py
+++ b/lib/_emerge/actions.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2021 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import collections
@@ -41,6 +41,7 @@ from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi.IndexedVardb import IndexedVardb
from portage.dep import Atom, _repo_separator, _slot_separator
+from portage.dep.libc import find_libc_deps
from portage.exception import (
InvalidAtom,
InvalidData,
@@ -547,8 +548,10 @@ def action_build(
mergelist_shown = True
if retval != os.EX_OK:
return retval
+ return os.EX_OK
- else:
+ gpg = None
+ try:
if not mergelist_shown:
# If we haven't already shown the merge list above, at
# least show warnings about missed updates and such.
@@ -687,8 +690,10 @@ def action_build(
ldpath_mtimes,
autoclean=1,
)
-
return retval
+ finally:
+ if gpg is not None:
+ gpg.stop()
def action_config(settings, trees, myopts, myfiles):
@@ -842,7 +847,7 @@ def action_depclean(
)
if not matched_packages:
writemsg_level(f">>> No packages selected for removal by {action}\n")
- return 0
+ return 1
# The calculation is done in a separate function so that depgraph
# references go out of scope and the corresponding memory
@@ -908,7 +913,16 @@ _depclean_result = collections.namedtuple(
)
-def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spinner):
+def _calc_depclean(
+ settings,
+ trees,
+ ldpath_mtimes,
+ myopts,
+ action,
+ args_set,
+ spinner,
+ frozen_config=None,
+):
allow_missing_deps = bool(args_set)
debug = "--debug" in myopts
@@ -987,12 +1001,14 @@ def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spi
writemsg_level("\nCalculating dependencies ")
resolver_params = create_depgraph_params(myopts, "remove")
- resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+ resolver = depgraph(
+ settings, trees, myopts, resolver_params, spinner, frozen_config=frozen_config
+ )
resolver._load_vdb()
vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
real_vardb = trees[eroot]["vartree"].dbapi
- if action == "depclean":
+ if action in ("dep_check", "depclean"):
if args_set:
if deselect:
# Start with an empty set.
@@ -1001,6 +1017,7 @@ def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spi
# Pull in any sets nested within the selected set.
selected_set.update(psets["selected"].getNonAtoms())
+ if args_set or action == "dep_check":
# Pull in everything that's installed but not matched
# by an argument atom since we don't want to clean any
# package if something depends on it.
@@ -1097,6 +1114,9 @@ def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spi
if not success:
return _depclean_result(1, [], False, 0, resolver)
+ if action == "dep_check":
+ return _depclean_result(0, [], False, 0, resolver)
+
def unresolved_deps():
soname_deps = set()
unresolvable = set()
@@ -1567,11 +1587,12 @@ def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spi
graph = digraph()
del cleanlist[:]
+ installtime = UnmergeDepPriority(installtime=True, runtime=True)
runtime = UnmergeDepPriority(runtime=True)
runtime_post = UnmergeDepPriority(runtime_post=True)
buildtime = UnmergeDepPriority(buildtime=True)
priority_map = {
- "IDEPEND": runtime,
+ "IDEPEND": installtime,
"RDEPEND": runtime,
"PDEPEND": runtime_post,
"BDEPEND": buildtime,
@@ -1641,7 +1662,11 @@ def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spi
if mypriority.runtime:
mypriority.runtime_slot_op = True
- graph.add(child_node, node, priority=mypriority)
+ # Drop direct circular deps because the unmerge order
+ # calculation does not handle them well as demonstrated
+ # by the test case for bug 916135.
+ if child_node is not node:
+ graph.add(child_node, node, priority=mypriority)
if debug:
writemsg_level("\nunmerge digraph:\n\n", noiselevel=-1, level=logging.DEBUG)
@@ -1678,6 +1703,8 @@ def _calc_depclean(settings, trees, ldpath_mtimes, myopts, action, args_set, spi
break
if not nodes:
raise AssertionError("no root nodes")
+ # Sort nodes for deterministic results.
+ nodes.sort(reverse=True)
if ignore_priority is not None:
# Some deps have been dropped due to circular dependencies,
# so only pop one node in order to minimize the number that
@@ -2786,10 +2813,8 @@ def relative_profile_path(portdir, abs_profile):
def get_libc_version(vardb: portage.dbapi.vartree.vardbapi) -> list[str]:
libcver = []
- libclist = set()
- for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
- if not atom.blocker:
- libclist.update(vardb.match(atom))
+ libclist = find_libc_deps(vardb, True)
+
if libclist:
for cpv in sorted(libclist):
libc_split = portage.catpkgsplit(cpv)[1:]
diff --git a/lib/_emerge/depgraph.py b/lib/_emerge/depgraph.py
index 966d702c4..a53eaaba9 100644
--- a/lib/_emerge/depgraph.py
+++ b/lib/_emerge/depgraph.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2023 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -36,6 +36,7 @@ from portage.dep import (
match_from_list,
_repo_separator,
)
+from portage.dep.libc import find_libc_deps, strip_libc_deps
from portage.dep._slot_operator import ignore_built_slot_operator_deps, strip_slots
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, _get_eapi_attrs
from portage.exception import (
@@ -407,7 +408,6 @@ class _use_changes(tuple):
class _dynamic_depgraph_config:
-
"""
``dynamic_depgraph_config`` is an object that is used to collect settings and important data structures that are
used in calculating Portage dependencies. Each depgraph created by the depgraph.py code gets its own
@@ -754,6 +754,7 @@ class depgraph:
def _dynamic_deps_preload(self, fake_vartree):
portdb = fake_vartree._portdb
+ config_pool = []
for pkg in fake_vartree.dbapi:
self._spinner_update()
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
@@ -768,12 +769,22 @@ class depgraph:
if metadata is not None:
fake_vartree.dynamic_deps_preload(pkg, metadata)
else:
+ if config_pool:
+ settings = config_pool.pop()
+ else:
+ settings = portage.config(clone=portdb.settings)
+
+ deallocate_config = portdb._event_loop.create_future()
+ deallocate_config.add_done_callback(
+ lambda future: config_pool.append(future.result())
+ )
proc = EbuildMetadataPhase(
cpv=pkg.cpv,
ebuild_hash=ebuild_hash,
portdb=portdb,
repo_path=repo_path,
settings=portdb.doebuild_settings,
+ deallocate_config=deallocate_config,
)
proc.addExitListener(self._dynamic_deps_proc_exit(pkg, fake_vartree))
yield proc
@@ -1255,9 +1266,10 @@ class depgraph:
# We don't want to list the same USE flags for multiple build IDs
seen.setdefault(pkg.root, dict())
- if (pkg.root, pkg.cpv) not in seen or flag_display not in seen[pkg.root][
- pkg.cpv
- ]:
+ if (
+ pkg.cpv not in seen[pkg.root]
+ or flag_display not in seen[pkg.root][pkg.cpv]
+ ):
seen[pkg.root].setdefault(pkg.cpv, set()).add(flag_display)
# The user can paste this line into package.use
messages.append(f" ={pkg.cpv} {flag_display}")
@@ -1288,17 +1300,33 @@ class depgraph:
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries_changed_deps(self, changed_deps):
- writemsg(
- "\n!!! The following binary packages have been "
- "ignored due to changed dependencies:\n\n",
- noiselevel=-1,
- )
+ merging = {
+ (pkg.root, pkg.cpv)
+ for pkg in self._dynamic_config._displayed_list or ()
+ if isinstance(pkg, Package)
+ }
+ messages = []
for pkg in changed_deps:
+ # Don't include recursive deps which aren't in the merge list anyway.
+ if (pkg.root, pkg.cpv) not in merging:
+ continue
+
msg = f" {pkg.cpv}{_repo_separator}{pkg.repo}"
if pkg.root_config.settings["ROOT"] != "/":
msg += f" for {pkg.root}"
- writemsg(f"{msg}\n", noiselevel=-1)
+ messages.append(f"{msg}\n")
+
+ if not messages:
+ return
+
+ writemsg(
+ "\n!!! The following binary packages have been "
+ "ignored due to changed dependencies:\n\n",
+ noiselevel=-1,
+ )
+ for line in messages:
+ writemsg(line, noiselevel=-1)
msg = [
"",
@@ -2949,6 +2977,23 @@ class depgraph:
return flags
return None
+ def _installed_libc_deps(self, eroot):
+ """
+ Return find_libc_deps result for installed packages from the
+ given EROOT.
+ """
+ try:
+ return self._frozen_config._libc_deps_cache[eroot]
+ except (AttributeError, KeyError) as e:
+ if isinstance(e, AttributeError):
+ self._frozen_config._libc_deps_cache = {}
+
+ self._frozen_config._libc_deps_cache[eroot] = find_libc_deps(
+ self._frozen_config._trees_orig[eroot]["vartree"].dbapi,
+ False,
+ )
+ return self._frozen_config._libc_deps_cache[eroot]
+
def _changed_deps(self, pkg):
ebuild = None
try:
@@ -2968,6 +3013,8 @@ class depgraph:
else:
depvars = Package._runtime_keys
+ libc_deps = self._installed_libc_deps(pkg.root)
+
# Use _raw_metadata, in order to avoid interaction
# with --dynamic-deps.
try:
@@ -2980,6 +3027,10 @@ class depgraph:
token_class=Atom,
)
strip_slots(dep_struct)
+ # This strip_libc_deps call is done with non-realized deps;
+ # we can change that later if we're having trouble with
+ # matching/intersecting them.
+ strip_libc_deps(dep_struct, libc_deps)
built_deps.append(dep_struct)
except InvalidDependString:
changed = True
@@ -2993,6 +3044,10 @@ class depgraph:
token_class=Atom,
)
strip_slots(dep_struct)
+ # This strip_libc_deps call is done with non-realized deps;
+ # we can change that later if we're having trouble with
+ # matching/intersecting them.
+ strip_libc_deps(dep_struct, libc_deps)
unbuilt_deps.append(dep_struct)
changed = built_deps != unbuilt_deps
@@ -3614,7 +3669,7 @@ class depgraph:
blocker=False,
depth=depth,
parent=pkg,
- priority=self._priority(runtime=True),
+ priority=self._priority(cross=self._cross(pkg.root), runtime=True),
root=pkg.root,
)
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
@@ -3629,9 +3684,19 @@ class depgraph:
careful to obey the user's wishes if they have explicitly requested
for a package to be rebuilt or reinstalled for some reason.
"""
- if "empty" in self._dynamic_config.myparams:
+ # Skip for slot conflicts since the merge list is not valid
+ # anyway, and possible state inconsistencies can trigger
+ # unexpected exceptions as in bug 922038.
+ if "empty" in self._dynamic_config.myparams or any(
+ self._dynamic_config._package_tracker.slot_conflicts()
+ ):
return False
+ # Track packages that we remove from the graph during
+ # this method call, in order to help trace any detected
+ # inconsistency back to this method or some other source
+ # such as _solve_non_slot_operator_slot_conflicts.
+ removed = []
modified = False
selective = "selective" in self._dynamic_config.myparams
for root, atom in self._dynamic_config._slot_operator_replace_installed:
@@ -3688,6 +3753,7 @@ class depgraph:
if pkg.requires != installed_instance.requires:
continue
+ libc_deps = self._installed_libc_deps(pkg.root)
depvars = Package._dep_keys
try:
installed_deps = []
@@ -3698,6 +3764,7 @@ class depgraph:
eapi=pkg.eapi,
token_class=Atom,
)
+ strip_libc_deps(dep_struct, libc_deps)
installed_deps.append(dep_struct)
except InvalidDependString:
continue
@@ -3721,6 +3788,7 @@ class depgraph:
eapi=pkg.eapi,
token_class=Atom,
)
+ strip_libc_deps(dep_struct, libc_deps)
new_deps.append(dep_struct)
if new_deps != installed_deps:
@@ -3729,11 +3797,55 @@ class depgraph:
modified = True
parent_atoms = []
for parent, parent_atom in self._dynamic_config._parent_atoms[pkg]:
- priorities = self._dynamic_config.digraph.nodes[pkg][1][parent][:]
+ try:
+ priorities = self._dynamic_config.digraph.nodes[pkg][1][parent][
+ :
+ ]
+ except KeyError:
+ optional_msg = " ({} previously removed from graph)"
+ warnings.warn(
+ f"_eliminate_rebuilds inconsistency: parent priorities missing for {parent} -> {pkg} edge"
+ + (
+ optional_msg.format("parent and child")
+ if parent in removed and pkg in removed
+ else (
+ optional_msg.format("parent")
+ if parent in removed
+ else (
+ optional_msg.format("child")
+ if pkg in removed
+ else ""
+ )
+ )
+ )
+ )
+ priorities = []
parent_atoms.append((parent, parent_atom, priorities))
child_parents = {}
for child in self._dynamic_config.digraph.child_nodes(pkg):
- priorities = self._dynamic_config.digraph.nodes[child][1][pkg][:]
+ try:
+ priorities = self._dynamic_config.digraph.nodes[child][1][pkg][
+ :
+ ]
+ except KeyError:
+ optional_msg = " ({} previously removed from graph)"
+ warnings.warn(
+ f"_eliminate_rebuilds inconsistency: parent priorities missing for {pkg} -> {child} edge"
+ + (
+ optional_msg.format("parent and child")
+ if pkg in removed and child in removed
+ else (
+ optional_msg.format("parent")
+ if pkg in removed
+ else (
+ optional_msg.format("child")
+ if child in removed
+ else ""
+ )
+ )
+ )
+ )
+ priorities = []
child_parents[child] = (
[
atom
@@ -3745,6 +3857,7 @@ class depgraph:
priorities,
)
self._remove_pkg(pkg, remove_orphans=False)
+ removed.append(pkg)
for parent, atom, priorities in parent_atoms:
self._add_parent_atom(installed_instance, (parent, atom))
for priority in priorities:
@@ -3952,17 +4065,28 @@ class depgraph:
# _dep_disjunctive_stack first, so that choices for build-time
# deps influence choices for run-time deps (bug 639346).
deps = (
- (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
+ (
+ myroot,
+ edepend["RDEPEND"],
+ self._priority(cross=self._cross(pkg.root), runtime=True),
+ ),
(
self._frozen_config._running_root.root,
edepend["IDEPEND"],
- self._priority(runtime=True),
+ self._priority(
+ cross=self._cross(pkg.root), installtime=True, runtime=True
+ ),
+ ),
+ (
+ myroot,
+ edepend["PDEPEND"],
+ self._priority(cross=self._cross(pkg.root), runtime_post=True),
),
- (myroot, edepend["PDEPEND"], self._priority(runtime_post=True)),
(
depend_root,
edepend["DEPEND"],
self._priority(
+ cross=self._cross(pkg.root),
buildtime=True,
optional=(pkg.built or ignore_depend_deps),
ignored=ignore_depend_deps,
@@ -3972,6 +4096,7 @@ class depgraph:
self._frozen_config._running_root.root,
edepend["BDEPEND"],
self._priority(
+ cross=self._cross(pkg.root),
buildtime=True,
optional=(pkg.built or ignore_bdepend_deps),
ignored=ignore_bdepend_deps,
@@ -4020,7 +4145,9 @@ class depgraph:
self._queue_disjunctive_deps(
pkg,
dep_root,
- self._priority(runtime_post=True),
+ self._priority(
+ cross=self._cross(pkg.root), runtime_post=True
+ ),
test_deps,
)
)
@@ -4028,7 +4155,9 @@ class depgraph:
if test_deps and not self._add_pkg_dep_string(
pkg,
dep_root,
- self._priority(runtime_post=True),
+ self._priority(
+ cross=self._cross(pkg.root), runtime_post=True
+ ),
test_deps,
allow_unsatisfied,
):
@@ -4343,7 +4472,10 @@ class depgraph:
return 0
for atom, child in self._minimize_children(
- pkg, self._priority(runtime=True), root_config, atoms
+ pkg,
+ self._priority(cross=self._cross(pkg.root), runtime=True),
+ root_config,
+ atoms,
):
# If this was a specially generated virtual atom
# from dep_check, map it back to the original, in
@@ -4353,7 +4485,7 @@ class depgraph:
atom = getattr(atom, "_orig_atom", atom)
# This is a GLEP 37 virtual, so its deps are all runtime.
- mypriority = self._priority(runtime=True)
+ mypriority = self._priority(cross=self._cross(pkg.root), runtime=True)
if not atom.blocker:
inst_pkgs = [
inst_pkg
@@ -4600,6 +4732,13 @@ class depgraph:
priority_constructor = DepPriority
return priority_constructor(**kwargs)
+ def _cross(self, eroot):
+ """
+ Returns True if the ROOT for the given EROOT is not /,
+ or EROOT is cross-prefix.
+ """
+ return eroot != self._frozen_config._running_root.root
+
def _dep_expand(self, root_config, atom_without_category):
"""
@param root_config: a root config instance
@@ -5709,9 +5848,9 @@ class depgraph:
self._select_atoms_parent = parent
mytrees["parent"] = parent
mytrees["atom_graph"] = atom_graph
- mytrees[
- "circular_dependency"
- ] = self._dynamic_config._circular_dependency
+ mytrees["circular_dependency"] = (
+ self._dynamic_config._circular_dependency
+ )
if priority is not None:
mytrees["priority"] = priority
@@ -5772,7 +5911,9 @@ class depgraph:
node_priority = priority.copy()
else:
# virtuals only have runtime deps
- node_priority = self._priority(runtime=True)
+ node_priority = self._priority(
+ cross=self._cross(node_parent.root), runtime=True
+ )
k = Dependency(
atom=parent_atom,
@@ -5858,7 +5999,7 @@ class depgraph:
pkg._metadata.get("RDEPEND", ""),
myuse=self._pkg_use_enabled(pkg),
parent=pkg,
- priority=self._priority(runtime=True),
+ priority=self._priority(cross=self._cross(pkg.root), runtime=True),
)
except InvalidDependString as e:
if not pkg.installed:
@@ -8805,6 +8946,13 @@ class depgraph:
return True
def _accept_blocker_conflicts(self):
+ """
+ Always returns False when backtracking is enabled, for
+ consistent results. When backtracking is disabled, returns
+ True for options that tolerate conflicts.
+ """
+ if self._dynamic_config._allow_backtracking:
+ return False
acceptable = False
for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri", "--nodeps"):
if x in self._frozen_config.myopts:
@@ -10416,24 +10564,24 @@ class depgraph:
filename = "package.accept_keywords"
else:
filename = "package.keywords"
- file_to_write_to[
- (abs_user_config, "package.keywords")
- ] = find_config_file(abs_user_config, filename)
+ file_to_write_to[(abs_user_config, "package.keywords")] = (
+ find_config_file(abs_user_config, filename)
+ )
if root in p_mask_change_msg:
- file_to_write_to[
- (abs_user_config, "package.unmask")
- ] = find_config_file(abs_user_config, "package.unmask")
+ file_to_write_to[(abs_user_config, "package.unmask")] = (
+ find_config_file(abs_user_config, "package.unmask")
+ )
if root in use_changes_msg:
- file_to_write_to[
- (abs_user_config, "package.use")
- ] = find_config_file(abs_user_config, "package.use")
+ file_to_write_to[(abs_user_config, "package.use")] = (
+ find_config_file(abs_user_config, "package.use")
+ )
if root in license_msg:
- file_to_write_to[
- (abs_user_config, "package.license")
- ] = find_config_file(abs_user_config, "package.license")
+ file_to_write_to[(abs_user_config, "package.license")] = (
+ find_config_file(abs_user_config, "package.license")
+ )
for (abs_user_config, f), path in file_to_write_to.items():
if path is None:
@@ -11647,6 +11795,7 @@ def backtrack_depgraph(
myaction: Optional[str],
myfiles: list[str],
spinner: "_emerge.stdout_spinner.stdout_spinner",
+ frozen_config: Optional[_frozen_depgraph_config] = None,
) -> tuple[Any, depgraph, list[str]]:
"""
@@ -11671,16 +11820,21 @@ def _backtrack_depgraph(
myaction: Optional[str],
myfiles: list[str],
spinner: "_emerge.stdout_spinner.stdout_spinner",
+ frozen_config: Optional[_frozen_depgraph_config] = None,
) -> tuple[Any, depgraph, list[str], int, int]:
debug = "--debug" in myopts
mydepgraph = None
- max_retries = myopts.get("--backtrack", 20)
+ nodeps = "--nodeps" in myopts
+ max_retries = 0 if nodeps else myopts.get("--backtrack", 20)
max_depth = max(1, (max_retries + 1) // 2)
allow_backtracking = max_retries > 0
backtracker = Backtracker(max_depth)
backtracked = 0
- frozen_config = _frozen_depgraph_config(settings, trees, myopts, myparams, spinner)
+ if frozen_config is None:
+ frozen_config = _frozen_depgraph_config(
+ settings, trees, myopts, myparams, spinner
+ )
while backtracker:
if debug and mydepgraph is not None:
@@ -11851,7 +12005,7 @@ def _resume_depgraph(
) or parent_node.operation not in ("merge", "nomerge"):
continue
# We need to traverse all priorities here, in order to
- # ensure that a package with an unsatisfied depenedency
+ # ensure that a package with an unsatisfied dependency
# won't get pulled in, even indirectly via a soft
# dependency.
unsatisfied_stack.append((parent_node, atom))
diff --git a/lib/_emerge/resolver/circular_dependency.py b/lib/_emerge/resolver/circular_dependency.py
index c88b18a57..6c2142308 100644
--- a/lib/_emerge/resolver/circular_dependency.py
+++ b/lib/_emerge/resolver/circular_dependency.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -132,9 +132,20 @@ class circular_dependency_handler:
for ppkg, atom in parent_atoms:
if ppkg == parent:
changed_parent = ppkg
- parent_atom = atom.unevaluated_atom
+ parent_atom = atom
break
+ if parent_atom.package:
+ parent_atom = parent_atom.unevaluated_atom
+ else:
+ # Treat soname deps as unconditional for now. In some
+ # cases they can be avoided by a rebuild with changed
+ # USE, but ebuilds sometimes do not specify the
+ # corresponding conditional dependency (especially for
+ # system packages like gcc which provides libstdc++.so.6
+ # and libgcc_s.so.1).
+ continue
+
try:
affecting_use = extract_affecting_use(
dep, parent_atom, eapi=parent.eapi
@@ -178,7 +189,7 @@ class circular_dependency_handler:
if len(affecting_use) > self.MAX_AFFECTING_USE:
# Limit the number of combinations explored (bug #555698).
- # First, discard irrelevent flags that are not enabled.
+ # First, discard irrelevant flags that are not enabled.
# Since extract_affecting_use doesn't distinguish between
# positive and negative effects (flag? vs. !flag?), assume
# a positive relationship.
@@ -243,8 +254,8 @@ class circular_dependency_handler:
continue
# Check if a USE change conflicts with use requirements of the parents.
- # If a requiremnet is hard, ignore the suggestion.
- # If the requirment is conditional, warn the user that other changes might be needed.
+ # If a requirement is hard, ignore the suggestion.
+ # If the requirement is conditional, warn the user that other changes might be needed.
followup_change = False
parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(
changed_parent
diff --git a/lib/_emerge/resolver/slot_collision.py b/lib/_emerge/resolver/slot_collision.py
index 7e579f394..d4f7018ea 100644
--- a/lib/_emerge/resolver/slot_collision.py
+++ b/lib/_emerge/resolver/slot_collision.py
@@ -446,17 +446,17 @@ class slot_conflict_handler:
modified_use=self.depgraph._pkg_use_enabled(ppkg),
):
selected_for_display.add((ppkg, atom))
- need_rebuild[
- ppkg
- ] = "matched by --useoldpkg-atoms argument"
+ need_rebuild[ppkg] = (
+ "matched by --useoldpkg-atoms argument"
+ )
elif usepkgonly:
# This case is tricky, so keep quiet in order to avoid false-positives.
pass
elif not self.depgraph._equiv_ebuild_visible(ppkg):
selected_for_display.add((ppkg, atom))
- need_rebuild[
- ppkg
- ] = "ebuild is masked or unavailable"
+ need_rebuild[ppkg] = (
+ "ebuild is masked or unavailable"
+ )
for ppkg, atom, other_pkg in parents:
selected_for_display.add((ppkg, atom))
diff --git a/lib/portage/_compat_upgrade/binpkg_format.py b/lib/portage/_compat_upgrade/binpkg_format.py
new file mode 100644
index 000000000..6ad24799c
--- /dev/null
+++ b/lib/portage/_compat_upgrade/binpkg_format.py
@@ -0,0 +1,51 @@
+# Copyright 2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+
+import portage
+from portage import os
+from portage.const import GLOBAL_CONFIG_PATH
+
+COMPAT_BINPKG_FORMAT = "xpak"
+
+
+def main():
+ """
+ If the current installation is still configured to use the old
+ default BINPKG_FORMAT=xpak setting, then patch make.globals
+ inside ${ED} to maintain backward compatibility, ensuring that
+ binary package consumers are not caught off guard. This is
+ intended to be called from the ebuild as follows:
+
+ pkg_preinst() {
+ python_setup
+ env -u BINPKG_FORMAT
+ PYTHONPATH="${D%/}$(python_get_sitedir)${PYTHONPATH:+:${PYTHONPATH}}" \
+ "${PYTHON}" -m portage._compat_upgrade.binpkg_format || die
+ }
+ """
+ if (
+ portage.settings.get("BINPKG_FORMAT", COMPAT_BINPKG_FORMAT)
+ == COMPAT_BINPKG_FORMAT
+ ):
+ config_path = os.path.join(
+ os.environ["ED"], GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals"
+ )
+ with open(config_path) as f:
+ content = f.read()
+ compat_setting = f'BINPKG_FORMAT="{COMPAT_BINPKG_FORMAT}"'
+ portage.output.EOutput().einfo(
+ "Setting make.globals default {} for backward compatibility".format(
+ compat_setting
+ )
+ )
+ content = re.sub(
+ "^BINPKG_FORMAT=.*$", compat_setting, content, flags=re.MULTILINE
+ )
+ with open(config_path, "w") as f:
+ f.write(content)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/portage/_compat_upgrade/meson.build b/lib/portage/_compat_upgrade/meson.build
index 365bd49ff..6db0981b9 100644
--- a/lib/portage/_compat_upgrade/meson.build
+++ b/lib/portage/_compat_upgrade/meson.build
@@ -1,6 +1,7 @@
py.install_sources(
[
'binpkg_compression.py',
+ 'binpkg_format.py',
'binpkg_multi_instance.py',
'default_locations.py',
'__init__.py',
diff --git a/lib/portage/_emirrordist/DeletionIterator.py b/lib/portage/_emirrordist/DeletionIterator.py
index 4494b7b24..bed40e935 100644
--- a/lib/portage/_emirrordist/DeletionIterator.py
+++ b/lib/portage/_emirrordist/DeletionIterator.py
@@ -27,9 +27,11 @@ class DeletionIterator:
distfiles_set = set()
distfiles_set.update(
(
- filename
- if isinstance(filename, DistfileName)
- else DistfileName(filename)
+ (
+ filename
+ if isinstance(filename, DistfileName)
+ else DistfileName(filename)
+ )
for filename in itertools.chain.from_iterable(
layout.get_filenames(distdir) for layout in self._config.layouts
)
diff --git a/lib/portage/_emirrordist/FetchIterator.py b/lib/portage/_emirrordist/FetchIterator.py
index 54a058bb5..e4fdd092a 100644
--- a/lib/portage/_emirrordist/FetchIterator.py
+++ b/lib/portage/_emirrordist/FetchIterator.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2018 Gentoo Foundation
+# Copyright 2013-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import threading
@@ -14,6 +14,7 @@ from portage.exception import PortageException, PortageKeyError
from portage.package.ebuild.fetch import DistfileName
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util.futures import asyncio
from portage.util.futures.iter_completed import iter_gather
from .FetchTask import FetchTask
from _emerge.CompositeTask import CompositeTask
@@ -276,8 +277,11 @@ def _async_fetch_tasks(config, hash_filter, repo_config, digests_future, cpv, lo
result.set_result(fetch_tasks)
def future_generator():
- yield config.portdb.async_aux_get(
- cpv, ("RESTRICT",), myrepo=repo_config.name, loop=loop
+ yield asyncio.ensure_future(
+ config.portdb.async_aux_get(
+ cpv, ("RESTRICT",), myrepo=repo_config.name, loop=loop
+ ),
+ loop,
)
yield config.portdb.async_fetch_map(cpv, mytree=repo_config.location, loop=loop)
@@ -292,9 +296,11 @@ def _async_fetch_tasks(config, hash_filter, repo_config, digests_future, cpv, lo
)
gather_result.add_done_callback(aux_get_done)
result.add_done_callback(
- lambda result: gather_result.cancel()
- if result.cancelled() and not gather_result.done()
- else None
+ lambda result: (
+ gather_result.cancel()
+ if result.cancelled() and not gather_result.done()
+ else None
+ )
)
return result
diff --git a/lib/portage/_global_updates.py b/lib/portage/_global_updates.py
index ba183e87b..f7997fc37 100644
--- a/lib/portage/_global_updates.py
+++ b/lib/portage/_global_updates.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import stat
@@ -38,9 +38,14 @@ def _global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if secpass < 2 or "SANDBOX_ACTIVE" in os.environ or len(trees) != 1:
return False
- return _do_global_updates(
- trees, prev_mtimes, quiet=quiet, if_mtime_changed=if_mtime_changed
- )
+ vardb = trees[trees._running_eroot]["vartree"].dbapi
+ vardb.lock()
+ try:
+ return _do_global_updates(
+ trees, prev_mtimes, quiet=quiet, if_mtime_changed=if_mtime_changed
+ )
+ finally:
+ vardb.unlock()
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
@@ -127,7 +132,7 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
retupd = True
if retupd:
- if os.access(bindb.bintree.pkgdir, os.W_OK):
+ if bindb.writable:
# Call binarytree.populate(), since we want to make sure it's
# only populated with local packages here (getbinpkgs=0).
bindb.bintree.populate()
diff --git a/lib/portage/_selinux.py b/lib/portage/_selinux.py
index bf6ad2489..5ae1b4e71 100644
--- a/lib/portage/_selinux.py
+++ b/lib/portage/_selinux.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2020 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
# Don't use the unicode-wrapped os and shutil modules here since
@@ -6,6 +6,7 @@
import os
import shutil
import warnings
+from functools import partial
try:
import selinux
@@ -134,14 +135,12 @@ class spawn_wrapper:
def __call__(self, *args, **kwargs):
if self._con is not None:
- pre_exec = kwargs.get("pre_exec")
-
- def _pre_exec():
- if pre_exec is not None:
- pre_exec()
- setexec(self._con)
-
- kwargs["pre_exec"] = _pre_exec
+ pre_exec = partial(setexec, self._con)
+ kwargs["pre_exec"] = (
+ portage.process._chain_pre_exec_fns(pre_exec, kwargs["pre_exec"])
+ if kwargs.get("pre_exec")
+ else pre_exec
+ )
return self._spawn_func(*args, **kwargs)
diff --git a/lib/portage/_sets/dbapi.py b/lib/portage/_sets/dbapi.py
index b3d7c5ffb..9c5b979a9 100644
--- a/lib/portage/_sets/dbapi.py
+++ b/lib/portage/_sets/dbapi.py
@@ -172,7 +172,7 @@ class VariableSet(EverythingSet):
for include in self._includes:
include_atoms.append(Atom(include))
- for x in use_reduce(values, token_class=Atom):
+ for x in use_reduce(values, token_class=Atom, flat=True):
if not isinstance(x, Atom):
continue
diff --git a/lib/portage/_sets/libs.py b/lib/portage/_sets/libs.py
index 9636b9d2c..860844235 100644
--- a/lib/portage/_sets/libs.py
+++ b/lib/portage/_sets/libs.py
@@ -33,7 +33,6 @@ class LibraryConsumerSet(PackageSet):
class LibraryFileConsumerSet(LibraryConsumerSet):
-
"""
Note: This does not detect libtool archive (*.la) files that consume the
specified files (revdep-rebuild is able to detect them).
diff --git a/lib/portage/binpkg.py b/lib/portage/binpkg.py
index 2078e3ca5..9ecd52cf3 100644
--- a/lib/portage/binpkg.py
+++ b/lib/portage/binpkg.py
@@ -67,7 +67,7 @@ def get_binpkg_format(binpkg_path, check_file=False, remote=False):
writemsg(
colorize(
"WARN",
- "File {} binpkg format mismatch, actual format: {}".format(
+ "File {} binpkg format mismatch, actual format: {}\n".format(
binpkg_path, file_format
),
)
diff --git a/lib/portage/cache/anydbm.py b/lib/portage/cache/anydbm.py
index 94a270a48..ad7042ae4 100644
--- a/lib/portage/cache/anydbm.py
+++ b/lib/portage/cache/anydbm.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2020 Gentoo Authors
+# Copyright 2005-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
@@ -67,6 +67,21 @@ class database(fs_template.FsBased):
raise cache_errors.InitializationError(self.__class__, e)
self._ensure_access(self._db_path)
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ # These attributes are not picklable, so they are automatically
+ # regenerated after unpickling.
+ state["_database__db"] = None
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ mode = "w"
+ if dbm.whichdb(self._db_path) in ("dbm.gnu", "gdbm"):
+ # Allow multiple concurrent writers (see bug #53607).
+ mode += "u"
+ self.__db = dbm.open(self._db_path, mode, self._perms)
+
def iteritems(self):
# dbm doesn't implement items()
for k in self.__db.keys():
diff --git a/lib/portage/const.py b/lib/portage/const.py
index 8769ab270..1909199ef 100644
--- a/lib/portage/const.py
+++ b/lib/portage/const.py
@@ -1,5 +1,5 @@
# portage: Constants
-# Copyright 1998-2023 Gentoo Authors
+# Copyright 1998-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
# BEGIN PREFIX LOCAL
@@ -197,6 +197,7 @@ SUPPORTED_FEATURES = frozenset(
"compressdebug",
"compress-index",
"config-protect-if-modified",
+ "dedupdebug",
"digest",
"distcc",
"distlocks",
@@ -215,6 +216,7 @@ SUPPORTED_FEATURES = frozenset(
"keepwork",
"lmirror",
"merge-sync",
+ "merge-wait",
"metadata-transfer",
"mirror",
"mount-sandbox",
diff --git a/lib/portage/dbapi/__init__.py b/lib/portage/dbapi/__init__.py
index 09163e94d..6f95b93a2 100644
--- a/lib/portage/dbapi/__init__.py
+++ b/lib/portage/dbapi/__init__.py
@@ -25,7 +25,11 @@ from portage.const import MERGING_IDENTIFIER
from portage import os
from portage import auxdbkeys
from portage.eapi import _get_eapi_attrs
-from portage.exception import InvalidBinaryPackageFormat, InvalidData
+from portage.exception import (
+ CorruptionKeyError,
+ InvalidBinaryPackageFormat,
+ InvalidData,
+)
from portage.localization import _
from _emerge.Package import Package
@@ -424,7 +428,7 @@ class dbapi:
if metadata_updates:
try:
aux_update(cpv, metadata_updates)
- except InvalidBinaryPackageFormat as e:
+ except (InvalidBinaryPackageFormat, CorruptionKeyError) as e:
warnings.warn(e)
if onUpdate:
onUpdate(maxval, i + 1)
@@ -470,5 +474,9 @@ class dbapi:
):
newslot = f"{newslot}/{mycpv.sub_slot}"
mydata = {"SLOT": newslot + "\n"}
- self.aux_update(mycpv, mydata)
+ try:
+ self.aux_update(mycpv, mydata)
+ except CorruptionKeyError as e:
+ warnings.warn(e)
+ continue
return moves
diff --git a/lib/portage/dbapi/bintree.py b/lib/portage/dbapi/bintree.py
index 9d09db3fe..005681adc 100644
--- a/lib/portage/dbapi/bintree.py
+++ b/lib/portage/dbapi/bintree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2023 Gentoo Authors
+# Copyright 1998-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["bindbapi", "binarytree"]
@@ -37,6 +37,7 @@ from portage.dbapi.virtual import fakedbapi
from portage.dep import Atom, use_reduce, paren_enclose
from portage.exception import (
AlarmSignal,
+ CorruptionKeyError,
InvalidPackageName,
InvalidBinaryPackageFormat,
ParseError,
@@ -84,9 +85,12 @@ class bindbapi(fakedbapi):
_known_keys = frozenset(
list(fakedbapi._known_keys) + ["CHOST", "repository", "USE"]
)
+ # Must include keys used to create _pkg_str attributes used in
+ # the fakedbapi _instance_key_multi_instance method.
_pkg_str_aux_keys = fakedbapi._pkg_str_aux_keys + (
"BUILD_ID",
"BUILD_TIME",
+ "SIZE",
"_mtime_",
)
@@ -209,9 +213,9 @@ class bindbapi(fakedbapi):
raise KeyError(mycpv)
binpkg_path = os.path.join(self.bintree.pkgdir, binpkg_path)
try:
- st = os.lstat(binpkg_path)
- except OSError:
- raise KeyError(mycpv)
+ st = os.stat(binpkg_path)
+ except OSError as oe:
+ raise CorruptionKeyError(mycpv) from oe
binpkg_format = get_binpkg_format(binpkg_path)
if binpkg_format == "xpak":
@@ -282,8 +286,10 @@ class bindbapi(fakedbapi):
cpv_str += f"-{build_id}"
binpkg_path = self.bintree.getname(cpv)
- if not os.path.exists(binpkg_path):
- raise KeyError(cpv)
+ try:
+ os.stat(binpkg_path)
+ except OSError as oe:
+ raise CorruptionKeyError(cpv) from oe
binpkg_format = get_binpkg_format(binpkg_path)
if binpkg_format == "xpak":
@@ -292,7 +298,21 @@ class bindbapi(fakedbapi):
encoding_key = True
elif binpkg_format == "gpkg":
mybinpkg = portage.gpkg.gpkg(self.settings, cpv_str, binpkg_path)
- mydata = mybinpkg.get_metadata()
+ try:
+ mydata = mybinpkg.get_metadata()
+ signature_exist = mybinpkg.signature_exist
+ except SignatureException:
+ signature_exist = True
+ if signature_exist:
+ writemsg(
+ colorize(
+ "WARN",
+ f"Binpkg update ignored for signed package: {binpkg_path}, "
+ "the file will be removed.\n",
+ )
+ )
+ self.bintree.remove(cpv)
+ return
encoding_key = False
else:
raise InvalidBinaryPackageFormat(
@@ -689,13 +709,23 @@ class binarytree:
continue
binpkg_path = self.getname(mycpv)
- if os.path.exists(binpkg_path) and not os.access(binpkg_path, os.W_OK):
+ try:
+ os.stat(binpkg_path)
+ except FileNotFoundError:
+ writemsg(_("!!! File not found: %s\n") % binpkg_path, noiselevel=-1)
+ continue
+ except OSError as oe:
+ writemsg(
+ _("!!! File os error (path %s): %s\n") % (binpkg_path, oe),
+ noiselevel=-1,
+ )
+ continue
+ if not os.access(binpkg_path, os.W_OK):
writemsg(
_("!!! Cannot update readonly binary: %s\n") % mycpv, noiselevel=-1
)
continue
- moves += 1
binpkg_format = get_binpkg_format(binpkg_path)
if binpkg_format == "xpak":
mytbz2 = portage.xpak.tbz2(binpkg_path)
@@ -703,12 +733,16 @@ class binarytree:
decode_metadata_name = False
elif binpkg_format == "gpkg":
mybinpkg = portage.gpkg.gpkg(self.settings, mycpv, binpkg_path)
- mydata = mybinpkg.get_metadata()
- if mybinpkg.signature_exist:
+ try:
+ mydata = mybinpkg.get_metadata()
+ signature_exist = mybinpkg.signature_exist
+ except SignatureException:
+ signature_exist = True
+ if signature_exist:
writemsg(
colorize(
"WARN",
- f"Binpkg update ignored for signed package: {binpkg_path}",
+ f"Binpkg update ignored for signed package: {binpkg_path}\n",
)
)
continue
@@ -716,6 +750,8 @@ class binarytree:
else:
continue
+ moves += 1
+
updated_items = update_dbentries([mylist], mydata, parent=mycpv)
mydata.update(updated_items)
if decode_metadata_name:
@@ -1314,7 +1350,7 @@ class binarytree:
# when binpackages are involved, not only when we refuse unsigned
# ones. (If the keys have expired we end up refusing signed but
# technically invalid packages...)
- if not pretend:
+ if not pretend and self.dbapi.writable:
self._run_trust_helper()
gpkg_only = True
else:
@@ -1324,23 +1360,13 @@ class binarytree:
for repo in reversed(list(self._binrepos_conf.values())):
base_url = repo.sync_uri
parsed_url = urlparse(base_url)
- host = parsed_url.netloc
+ host = parsed_url.hostname or ""
port = parsed_url.port
- user = None
- passwd = None
- user_passwd = ""
+ user = parsed_url.username
+ passwd = parsed_url.password
+ user_passwd = user + "@" if user else ""
gpkg_only_warned = False
- if "@" in host:
- user, host = host.split("@", 1)
- user_passwd = user + "@"
- if ":" in user:
- user, passwd = user.split(":", 1)
-
- if port is not None:
- port_str = f":{port}"
- if host.endswith(port_str):
- host = host[: -len(port_str)]
pkgindex_file = os.path.join(
self.settings["EROOT"],
CACHE_PATH,
@@ -1405,15 +1431,18 @@ class binarytree:
# Don't use urlopen for https, unless
# PEP 476 is supported (bug #469888).
- if repo.fetchcommand is None and (
- parsed_url.scheme not in ("https",) or _have_pep_476()
- ):
+ if (
+ repo.fetchcommand is None or parsed_url.scheme in ("", "file")
+ ) and (parsed_url.scheme not in ("https",) or _have_pep_476()):
try:
- f = _urlopen(
- url, if_modified_since=local_timestamp, proxies=proxies
- )
- if hasattr(f, "headers") and f.headers.get("timestamp", ""):
- remote_timestamp = f.headers.get("timestamp")
+ if parsed_url.scheme in ("", "file"):
+ f = open(f"{parsed_url.path.rstrip('/')}/Packages", "rb")
+ else:
+ f = _urlopen(
+ url, if_modified_since=local_timestamp, proxies=proxies
+ )
+ if hasattr(f, "headers") and f.headers.get("timestamp", ""):
+ remote_timestamp = f.headers.get("timestamp")
except OSError as err:
if (
hasattr(err, "code") and err.code == 304
@@ -1775,6 +1804,57 @@ class binarytree:
return cpv
+ def remove(self, cpv: portage.versions._pkg_str) -> None:
+ """
+ Remove a package instance and update internal state including
+ the package index. This will raise a KeyError if cpv is not
+ found in the internal state. It will display a warning message
+ if the package file was not found on disk, since it could have
+ been removed by another process before this method could
+ acquire a lock.
+
+ @param cpv: The cpv of the existing package to remove
+ @type cpv: portage.versions._pkg_str
+ @rtype: None
+ @return: None
+ @raise KeyError: If cpv does not exist in the internal state
+ """
+ if not self.populated:
+ self.populate()
+ os.makedirs(self.pkgdir, exist_ok=True)
+ pkgindex_lock = lockfile(self._pkgindex_file, wantnewlockfile=1)
+ try:
+ # Will raise KeyError if the package is not found.
+ instance_key = self.dbapi._instance_key(cpv)
+ pkg_path = self.getname(cpv)
+ self.dbapi.cpv_remove(cpv)
+ self._pkg_paths.pop(instance_key, None)
+ if self._remotepkgs is not None:
+ self._remotepkgs.pop(instance_key, None)
+ pkgindex = self._load_pkgindex()
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+
+ path = pkg_path[len(self.pkgdir) + 1 :]
+ for i in range(len(pkgindex.packages) - 1, -1, -1):
+ d = pkgindex.packages[i]
+ if cpv == d.get("CPV"):
+ if path == d.get("PATH", ""):
+ del pkgindex.packages[i]
+
+ self._pkgindex_write(pkgindex)
+ try:
+ os.remove(pkg_path)
+ except OSError as err:
+ writemsg(
+ colorize(
+ "WARN",
+ f"Failed to remove package: {pkg_path} {str(err)}",
+ )
+ )
+ finally:
+ unlockfile(pkgindex_lock)
+
def _read_metadata(self, filename, st, keys=None, binpkg_format=None):
"""
Read metadata from a binary package. The returned metadata
@@ -2198,16 +2278,6 @@ class binarytree:
raise InvalidBinaryPackageFormat(binpkg_format)
def _allocate_filename_multi(self, cpv, remote_binpkg_format=None):
- # First, get the max build_id found when _populate was
- # called.
- max_build_id = self._max_build_id(cpv)
-
- # A new package may have been added concurrently since the
- # last _populate call, so use increment build_id until
- # we locate an unused id.
- pf = catsplit(cpv)[1]
- build_id = max_build_id + 1
-
if remote_binpkg_format is None:
try:
binpkg_format = get_binpkg_format(cpv._metadata["PATH"])
@@ -2225,6 +2295,33 @@ class binarytree:
else:
raise InvalidBinaryPackageFormat(binpkg_format)
+ # If the preferred path is available then return
+ # that. This prevents unnecessary build_id incrementation
+ # triggered when the _max_build_id method counts remote
+ # build ids.
+ pf = catsplit(cpv)[1]
+ if getattr(cpv, "build_id", False):
+ preferred_path = f"{os.path.join(self.pkgdir, cpv.cp, pf)}-{cpv.build_id}.{binpkg_suffix}"
+ if not os.path.exists(preferred_path):
+ try:
+ # Avoid races
+ ensure_dirs(os.path.dirname(preferred_path))
+ with open(preferred_path, "x") as f:
+ pass
+ except FileExistsError:
+ pass
+ else:
+ return (preferred_path, cpv.build_id)
+
+ # First, get the max build_id found when _populate was
+ # called.
+ max_build_id = self._max_build_id(cpv)
+
+ # A new package may have been added concurrently since the
+ # last _populate call, so use increment build_id until
+ # we locate an unused id.
+ build_id = max_build_id + 1
+
while True:
filename = (
f"{os.path.join(self.pkgdir, cpv.cp, pf)}-{build_id}.{binpkg_suffix}"
diff --git a/lib/portage/dbapi/porttree.py b/lib/portage/dbapi/porttree.py
index eabf2d0a2..4eebe1183 100644
--- a/lib/portage/dbapi/porttree.py
+++ b/lib/portage/dbapi/porttree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2021 Gentoo Authors
+# Copyright 1998-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"]
@@ -41,7 +41,9 @@ from portage.util.futures import asyncio
from portage.util.futures.iter_completed import iter_gather
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+import contextlib
import os as _os
+import threading
import traceback
import warnings
import errno
@@ -106,7 +108,6 @@ class _dummy_list(list):
class _better_cache:
-
"""
The purpose of better_cache is to locate catpkgs in repositories using ``os.listdir()`` as much as possible, which
is less expensive IO-wise than exhaustively doing a stat on each repo for a particular catpkg. better_cache stores a
@@ -240,6 +241,7 @@ class portdbapi(dbapi):
# this purpose because doebuild makes many changes to the config
# instance that is passed in.
self.doebuild_settings = config(clone=self.settings)
+ self._doebuild_settings_lock = asyncio.Lock()
self.depcachedir = os.path.realpath(self.settings.depcachedir)
if os.environ.get("SANDBOX_ON") == "1":
@@ -357,6 +359,17 @@ class portdbapi(dbapi):
self._better_cache = None
self._broken_ebuilds = set()
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ # These attributes are not picklable, so they are automatically
+ # regenerated after unpickling.
+ state["_doebuild_settings_lock"] = None
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self._doebuild_settings_lock = asyncio.Lock()
+
def _set_porttrees(self, porttrees):
"""
Consumers, such as emirrordist, may modify the porttrees attribute in
@@ -670,7 +683,7 @@ class portdbapi(dbapi):
self.async_aux_get(mycpv, mylist, mytree=mytree, myrepo=myrepo, loop=loop)
)
- def async_aux_get(self, mycpv, mylist, mytree=None, myrepo=None, loop=None):
+ async def async_aux_get(self, mycpv, mylist, mytree=None, myrepo=None, loop=None):
"""
Asynchronous form form of aux_get.
@@ -695,13 +708,11 @@ class portdbapi(dbapi):
# Callers of this method certainly want the same event loop to
# be used for all calls.
loop = asyncio._wrap_loop(loop)
- future = loop.create_future()
cache_me = False
if myrepo is not None:
mytree = self.treemap.get(myrepo)
if mytree is None:
- future.set_exception(PortageKeyError(myrepo))
- return future
+ raise PortageKeyError(myrepo)
if (
mytree is not None
@@ -720,16 +731,14 @@ class portdbapi(dbapi):
):
aux_cache = self._aux_cache.get(mycpv)
if aux_cache is not None:
- future.set_result([aux_cache.get(x, "") for x in mylist])
- return future
+ return [aux_cache.get(x, "") for x in mylist]
cache_me = True
try:
cat, pkg = mycpv.split("/", 1)
except ValueError:
# Missing slash. Can't find ebuild so raise PortageKeyError.
- future.set_exception(PortageKeyError(mycpv))
- return future
+ raise PortageKeyError(mycpv)
myebuild, mylocation = self.findname2(mycpv, mytree)
@@ -738,12 +747,12 @@ class portdbapi(dbapi):
"!!! aux_get(): %s\n" % _("ebuild not found for '%s'") % mycpv,
noiselevel=1,
)
- future.set_exception(PortageKeyError(mycpv))
- return future
+ raise PortageKeyError(mycpv)
mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
if mydata is not None:
+ future = loop.create_future()
self._aux_get_return(
future,
mycpv,
@@ -755,37 +764,71 @@ class portdbapi(dbapi):
cache_me,
None,
)
- return future
+ return future.result()
if myebuild in self._broken_ebuilds:
- future.set_exception(PortageKeyError(mycpv))
- return future
-
- proc = EbuildMetadataPhase(
- cpv=mycpv,
- ebuild_hash=ebuild_hash,
- portdb=self,
- repo_path=mylocation,
- scheduler=loop,
- settings=self.doebuild_settings,
- )
+ raise PortageKeyError(mycpv)
- proc.addExitListener(
- functools.partial(
- self._aux_get_return,
- future,
- mycpv,
- mylist,
- myebuild,
- ebuild_hash,
- mydata,
- mylocation,
- cache_me,
- )
- )
- future.add_done_callback(functools.partial(self._aux_get_cancel, proc))
- proc.start()
- return future
+ proc = None
+ deallocate_config = None
+ async with contextlib.AsyncExitStack() as stack:
+ try:
+ if (
+ threading.current_thread() is threading.main_thread()
+ and loop is asyncio._safe_loop()
+ ):
+ # In this case use self._doebuild_settings_lock to manage concurrency.
+ deallocate_config = loop.create_future()
+ await stack.enter_async_context(self._doebuild_settings_lock)
+ settings = self.doebuild_settings
+ else:
+ if portage._internal_caller:
+ raise AssertionError(
+ f"async_aux_get called from thread {threading.current_thread()} with loop {loop}"
+ )
+ # Clone a config instance since we do not have a thread-safe config pool.
+ settings = portage.config(clone=self.settings)
+
+ proc = EbuildMetadataPhase(
+ cpv=mycpv,
+ ebuild_hash=ebuild_hash,
+ portdb=self,
+ repo_path=mylocation,
+ scheduler=loop,
+ settings=settings,
+ deallocate_config=deallocate_config,
+ )
+
+ future = loop.create_future()
+ proc.addExitListener(
+ functools.partial(
+ self._aux_get_return,
+ future,
+ mycpv,
+ mylist,
+ myebuild,
+ ebuild_hash,
+ mydata,
+ mylocation,
+ cache_me,
+ )
+ )
+ future.add_done_callback(functools.partial(self._aux_get_cancel, proc))
+ proc.start()
+
+ finally:
+ # Wait for deallocate_config before releasing
+ # self._doebuild_settings_lock if needed.
+ if deallocate_config is not None:
+ if proc is None or not proc.isAlive():
+ deallocate_config.done() or deallocate_config.cancel()
+ else:
+ await deallocate_config
+
+ # After deallocate_config is done, release self._doebuild_settings_lock
+ # by leaving the stack context, and wait for proc to finish and
+ # trigger a call to self._aux_get_return.
+ return await future
@staticmethod
def _aux_get_cancel(proc, future):
@@ -890,7 +933,7 @@ class portdbapi(dbapi):
)
)
else:
- result.set_exception(future.exception())
+ result.set_exception(aux_get_future.exception())
return
eapi, myuris = aux_get_future.result()
@@ -914,8 +957,9 @@ class portdbapi(dbapi):
except Exception as e:
result.set_exception(e)
- aux_get_future = self.async_aux_get(
- mypkg, ["EAPI", "SRC_URI"], mytree=mytree, loop=loop
+ aux_get_future = asyncio.ensure_future(
+ self.async_aux_get(mypkg, ["EAPI", "SRC_URI"], mytree=mytree, loop=loop),
+ loop,
)
result.add_done_callback(
lambda result: aux_get_future.cancel() if result.cancelled() else None
diff --git a/lib/portage/dbapi/vartree.py b/lib/portage/dbapi/vartree.py
index 46ce23b0c..05e16b569 100644
--- a/lib/portage/dbapi/vartree.py
+++ b/lib/portage/dbapi/vartree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2021 Gentoo Authors
+# Copyright 1998-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["vardbapi", "vartree", "dblink"] + ["write_contents", "tar_contents"]
@@ -65,6 +65,7 @@ from portage.const import (
from portage.dbapi import dbapi
from portage.exception import (
CommandNotFound,
+ CorruptionKeyError,
InvalidData,
InvalidLocation,
InvalidPackageName,
@@ -1005,8 +1006,10 @@ class vardbapi(dbapi):
def aux_update(self, cpv, values):
mylink = self._dblink(cpv)
- if not mylink.exists():
- raise KeyError(cpv)
+ try:
+ os.stat(mylink.dbdir)
+ except OSError as oe:
+ raise CorruptionKeyError(cpv) from oe
self._bump_mtime(cpv)
self._clear_pkg_cache(mylink)
for k, v in values.items():
@@ -2176,9 +2179,11 @@ class dblink:
# The tarfile module will write pax headers holding the
# xattrs only if PAX_FORMAT is specified here.
with tarfile.open(
- fileobj=output_file
- if hasattr(output_file, "write")
- else open(output_file.fileno(), mode="wb", closefd=False),
+ fileobj=(
+ output_file
+ if hasattr(output_file, "write")
+ else open(output_file.fileno(), mode="wb", closefd=False)
+ ),
mode="w|",
format=tarfile.PAX_FORMAT if xattrs else tarfile.DEFAULT_FORMAT,
) as tar:
@@ -5311,12 +5316,14 @@ class dblink:
# to TextIOWrapper with python2.
contents_tmp_path = os.path.join(self.dbtmpdir, "CONTENTS")
outfile = atomic_ofstream(
- contents_tmp_path
- if portage.utf8_mode
- else _unicode_encode(
- contents_tmp_path,
- encoding=_encodings["fs"],
- errors="strict",
+ (
+ contents_tmp_path
+ if portage.utf8_mode
+ else _unicode_encode(
+ contents_tmp_path,
+ encoding=_encodings["fs"],
+ errors="strict",
+ )
),
mode="w",
encoding=_encodings["repo.content"],
@@ -6301,7 +6308,19 @@ class dblink:
if not _cmpxattr(src_bytes, dest_bytes, exclude=excluded_xattrs):
return True
- return not filecmp.cmp(src_bytes, dest_bytes, shallow=False)
+ try:
+ files_equal = filecmp.cmp(src_bytes, dest_bytes, shallow=False)
+ except Exception as e:
+ writemsg(
+ _(
+ "Exception '%s' happened when comparing files %s and %s, will replace the latter\n"
+ )
+ % (e, mysrc, mydest),
+ noiselevel=-1,
+ )
+ return True
+
+ return not files_equal
def merge(
@@ -6552,9 +6571,9 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None, xattrs=Fals
# Compatible with GNU tar, which saves the xattrs
# under the SCHILY.xattr namespace.
for k in xattr.list(path_bytes):
- tarinfo.pax_headers[
- "SCHILY.xattr." + _unicode_decode(k)
- ] = _unicode_decode(xattr.get(path_bytes, _unicode_encode(k)))
+ tarinfo.pax_headers["SCHILY.xattr." + _unicode_decode(k)] = (
+ _unicode_decode(xattr.get(path_bytes, _unicode_encode(k)))
+ )
with open(path_bytes, "rb") as f:
tar.addfile(tarinfo, f)
diff --git a/lib/portage/dep/__init__.py b/lib/portage/dep/__init__.py
index a54422153..a4a5bf26b 100644
--- a/lib/portage/dep/__init__.py
+++ b/lib/portage/dep/__init__.py
@@ -57,7 +57,10 @@ from portage.versions import (
ververify,
)
import portage.cache.mappings
+from typing import TYPE_CHECKING
+if TYPE_CHECKING:
+ import _emerge.Package
# \w is [a-zA-Z0-9_]
@@ -1437,7 +1440,6 @@ class _use_dep:
class Atom(str):
-
"""
For compatibility with existing atom string manipulation code, this
class emulates most of the str methods that are useful with atoms.
@@ -1724,7 +1726,7 @@ class Atom(str):
)
@property
- def slot_operator_built(self):
+ def slot_operator_built(self) -> bool:
"""
Returns True if slot_operator == "=" and sub_slot is not None.
NOTE: foo/bar:2= is unbuilt and returns False, whereas foo/bar:2/2=
@@ -1733,7 +1735,7 @@ class Atom(str):
return self.slot_operator == "=" and self.sub_slot is not None
@property
- def without_repo(self):
+ def without_repo(self) -> "Atom":
if self.repo is None:
return self
return Atom(
@@ -1741,7 +1743,7 @@ class Atom(str):
)
@property
- def without_slot(self):
+ def without_slot(self) -> "Atom":
if self.slot is None and self.slot_operator is None:
return self
atom = remove_slot(self)
@@ -1751,7 +1753,7 @@ class Atom(str):
atom += str(self.use)
return Atom(atom, allow_repo=True, allow_wildcard=True)
- def with_repo(self, repo):
+ def with_repo(self, repo) -> "Atom":
atom = remove_slot(self)
if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
@@ -1766,7 +1768,7 @@ class Atom(str):
atom += str(self.use)
return Atom(atom, allow_repo=True, allow_wildcard=True)
- def with_slot(self, slot):
+ def with_slot(self, slot) -> "Atom":
atom = remove_slot(self) + _slot_separator + slot
if self.repo is not None:
atom += _repo_separator + self.repo
@@ -1779,7 +1781,7 @@ class Atom(str):
"Atom instances are immutable", self.__class__, name, value
)
- def intersects(self, other):
+ def intersects(self, other: "Atom") -> bool:
"""
Atoms with different cpv, operator or use attributes cause this method
to return False even though there may actually be some intersection.
@@ -1809,7 +1811,7 @@ class Atom(str):
return False
- def evaluate_conditionals(self, use):
+ def evaluate_conditionals(self, use: set) -> "Atom":
"""
Create an atom instance with any USE conditionals evaluated.
@param use: The set of enabled USE flags
@@ -1837,7 +1839,9 @@ class Atom(str):
_use=use_dep,
)
- def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ def violated_conditionals(
+ self, other_use: set, is_valid_flag: callable, parent_use=None
+ ) -> "Atom":
"""
Create an atom instance with any USE conditional removed, that is
satisfied by other_use.
@@ -1900,7 +1904,7 @@ class Atom(str):
memo[id(self)] = self
return self
- def match(self, pkg):
+ def match(self, pkg: "_emerge.Package"):
"""
Check if the given package instance matches this atom.
diff --git a/lib/portage/dep/_slot_operator.py b/lib/portage/dep/_slot_operator.py
index 82dd7d66c..d3f506450 100644
--- a/lib/portage/dep/_slot_operator.py
+++ b/lib/portage/dep/_slot_operator.py
@@ -91,6 +91,7 @@ def _eval_deps(dep_struct, vardbs):
# and B installed should record subslot on A only since the package is
# supposed to link against that anyway, and we have no guarantee that B
# has matching ABI.
+ # See bug #455904, bug #489458, bug #586238.
for i, x in enumerate(dep_struct):
if isinstance(x, list):
diff --git a/lib/portage/dep/dep_check.py b/lib/portage/dep/dep_check.py
index 5ca0995a8..c361ee59e 100644
--- a/lib/portage/dep/dep_check.py
+++ b/lib/portage/dep/dep_check.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["dep_check", "dep_eval", "dep_wordreduce", "dep_zapdeps"]
@@ -963,7 +963,8 @@ def _overlap_dnf(dep_struct):
order to minimize the number of packages chosen to satisfy cases like
"|| ( foo bar ) || ( bar baz )" as in bug #632026. Non-overlapping
groups are excluded from the conversion, since DNF leads to exponential
- explosion of the formula.
+ explosion of the formula. Duplicate || groups are eliminated since
+ DNF expansion of duplicates is nonsensical (bug #891137).
When dep_struct does not contain any overlapping groups, no DNF
conversion will be performed, and dep_struct will be returned as-is.
@@ -1021,7 +1022,17 @@ def _overlap_dnf(dep_struct):
if len(disjunctions) > 1:
overlap = True
# convert overlapping disjunctions to DNF
- result.extend(_dnf_convert(sorted(disjunctions.values(), key=order_key)))
+ dedup_set = set()
+ unique_disjunctions = []
+ for x in sorted(disjunctions.values(), key=order_key):
+ dep_repr = portage.dep.paren_enclose(x, opconvert=True)
+ if dep_repr not in dedup_set:
+ dedup_set.add(dep_repr)
+ unique_disjunctions.append(x)
+ if len(unique_disjunctions) > 1:
+ result.extend(_dnf_convert(unique_disjunctions))
+ else:
+ result.extend(unique_disjunctions)
else:
# pass through non-overlapping disjunctions
result.append(disjunctions.popitem()[1])
diff --git a/lib/portage/dep/libc.py b/lib/portage/dep/libc.py
new file mode 100644
index 000000000..db88432cb
--- /dev/null
+++ b/lib/portage/dep/libc.py
@@ -0,0 +1,83 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import Atom
+from portage.const import LIBC_PACKAGE_ATOM
+from portage.dbapi._expand_new_virt import expand_new_virt
+
+import portage.dbapi.porttree
+
+
+def find_libc_deps(portdb: portage.dbapi.porttree.dbapi, realized: bool = False):
+ """Finds libc package for a ROOT via portdb.
+
+ Parameters
+ ----------
+ portdb : dbapi
+ dbapi instance for portdb (for installed packages).
+ realized : bool
+ Request installed atoms rather than the installed package satisfying LIBC_PACKAGE_ATOM.
+
+ Returns
+ -------
+ list
+ List of libc packages (or atoms if realized is passed).
+ """
+
+ libc_pkgs = set()
+
+ for atom in expand_new_virt(
+ portdb,
+ LIBC_PACKAGE_ATOM,
+ ):
+ if atom.blocker:
+ continue
+
+ if not realized:
+ # Just the raw packages were requested (whatever satifies the virtual)
+ libc_pkgs.add(atom)
+ continue
+
+ # This will give us something like sys-libs/glibc:2.2, but we want to know
+ # what installed atom actually satifies that.
+ try:
+ libc_pkgs.add(portdb.match(atom)[0])
+ except IndexError:
+ continue
+
+ return libc_pkgs
+
+
+def strip_libc_deps(dep_struct: list, libc_deps: set):
+ """Strip libc dependency out of a given dependency strucutre.
+
+ Parameters
+ ----------
+ dep_struct: list
+ List of package dependencies (atoms).
+
+ libc_deps: set
+ List of dependencies satisfying LIBC_PACKAGE_ATOM to be
+ stripped out of any dependencies.
+
+ Returns
+ -------
+ list
+ List of dependencies with any matching libc_deps removed.
+ """
+ # We're going to just grab the libc provider for ROOT and
+ # strip out any dep for the purposes of --changed-deps.
+ # We can't go off versions, even though it'd be more precise
+ # (see below), because we'd end up with FPs and unnecessary
+ # --changed-deps results far too often.
+ #
+ # This penalizes a bit the case where someone adds a
+ # minimum (or maximum) version of libc explicitly in an ebuild
+ # without a new revision, but that's extremely rare, and doesn't
+ # feel like it changes the balance for what we prefer here.
+
+ for i, x in reversed(list(enumerate(dep_struct))):
+ # We only need to bother if x is an Atom because we know the deps
+ # we inject are simple & flat.
+ if isinstance(x, Atom) and any(x.cp == libc_dep.cp for libc_dep in libc_deps):
+ del dep_struct[i]
diff --git a/lib/portage/dep/meson.build b/lib/portage/dep/meson.build
index ea1e8cad6..d2379d8cb 100644
--- a/lib/portage/dep/meson.build
+++ b/lib/portage/dep/meson.build
@@ -1,6 +1,7 @@
py.install_sources(
[
'dep_check.py',
+ 'libc.py',
'_dnf.py',
'_slot_operator.py',
'__init__.py',
diff --git a/lib/portage/dep/soname/multilib_category.py b/lib/portage/dep/soname/multilib_category.py
index 14a9eea77..baca439fd 100644
--- a/lib/portage/dep/soname/multilib_category.py
+++ b/lib/portage/dep/soname/multilib_category.py
@@ -52,6 +52,11 @@ from portage.util.elf.constants import (
EM_AARCH64,
EM_ALPHA,
EM_AMDGPU,
+ EM_ARC,
+ EM_ARC_COMPACT,
+ EM_ARC_COMPACT2,
+ EM_ARC_COMPACT3,
+ EM_ARC_COMPACT3_64,
EM_ARM,
EM_ALTERA_NIOS2,
EM_IA_64,
@@ -80,6 +85,11 @@ _machine_prefix_map = {
EM_ALPHA: "alpha",
EM_AMDGPU: "amdgpu",
EM_ALTERA_NIOS2: "nios2",
+ EM_ARC: "arc",
+ EM_ARC_COMPACT: "arc",
+ EM_ARC_COMPACT2: "arc",
+ EM_ARC_COMPACT3: "arc",
+ EM_ARC_COMPACT3_64: "arc",
EM_ARM: "arm",
EM_IA_64: "ia64",
EM_LOONGARCH: "loong",
diff --git a/lib/portage/emaint/main.py b/lib/portage/emaint/main.py
index 0c620a1a8..ad6eea359 100644
--- a/lib/portage/emaint/main.py
+++ b/lib/portage/emaint/main.py
@@ -62,7 +62,7 @@ class OptionItem:
def usage(module_controller):
- _usage = "usage: emaint [options] COMMAND"
+ _usage = "emaint [options] COMMAND"
desc = (
"The emaint program provides an interface to system health "
diff --git a/lib/portage/emaint/modules/merges/merges.py b/lib/portage/emaint/modules/merges/merges.py
index 7c16b3e6c..dec97f83e 100644
--- a/lib/portage/emaint/modules/merges/merges.py
+++ b/lib/portage/emaint/modules/merges/merges.py
@@ -22,7 +22,9 @@ class TrackingFile:
@param tracking_path: file path used to keep track of failed merges
@type tracking_path: String
"""
- self._tracking_path = _unicode_encode(tracking_path)
+ self._tracking_path = (
+ tracking_path if portage.utf8_mode else _unicode_encode(tracking_path)
+ )
def save(self, failed_pkgs):
"""
diff --git a/lib/portage/exception.py b/lib/portage/exception.py
index 505e920de..7b48aa919 100644
--- a/lib/portage/exception.py
+++ b/lib/portage/exception.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2020 Gentoo Authors
+# Copyright 1998-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import signal
@@ -30,6 +30,10 @@ class CorruptionError(PortageException):
"""Corruption indication"""
+class CorruptionKeyError(CorruptionError, PortageKeyError):
+ """KeyError raised when corruption is detected (cause should be accesssible as __cause__)"""
+
+
class InvalidDependString(PortageException):
"""An invalid depend string has been encountered"""
@@ -197,6 +201,10 @@ class CompressorOperationFailed(PortagePackageException):
"""An error occurred during external operation"""
+class SignedPackage(PortagePackageException):
+ """Unable to update a signed package"""
+
+
class InvalidAtom(PortagePackageException):
"""Malformed atom spec"""
diff --git a/lib/portage/gpg.py b/lib/portage/gpg.py
index 306787224..d8a4cfcfc 100644
--- a/lib/portage/gpg.py
+++ b/lib/portage/gpg.py
@@ -1,10 +1,9 @@
-# Copyright 2001-2020 Gentoo Authors
+# Copyright 2001-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import subprocess
import sys
import threading
-import time
from portage import os
from portage.const import SUPPORTED_GENTOO_BINPKG_FORMATS
@@ -24,6 +23,7 @@ class GPG:
"""
self.settings = settings
self.thread = None
+ self._terminated = None
self.GPG_signing_base_command = self.settings.get(
"BINPKG_GPG_SIGNING_BASE_COMMAND"
)
@@ -73,6 +73,7 @@ class GPG:
self.GPG_unlock_command = shlex_split(
varexpand(self.GPG_unlock_command, mydict=self.settings)
)
+ self._terminated = threading.Event()
self.thread = threading.Thread(target=self.gpg_keepalive, daemon=True)
self.thread.start()
@@ -81,16 +82,17 @@ class GPG:
Stop keepalive thread.
"""
if self.thread is not None:
- self.keepalive = False
+ self._terminated.set()
def gpg_keepalive(self):
"""
Call GPG unlock command every 5 mins to avoid the passphrase expired.
"""
count = 0
- while self.keepalive:
+ while not self._terminated.is_set():
if count < 5:
- time.sleep(60)
+ if self._terminated.wait(60):
+ break
count += 1
continue
else:
@@ -102,5 +104,5 @@ class GPG:
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
- if proc.wait() != os.EX_OK:
+ if proc.wait() != os.EX_OK and not self._terminated.is_set():
raise GPGException("GPG keepalive failed")
diff --git a/lib/portage/gpkg.py b/lib/portage/gpkg.py
index c56076ab9..edb0e43fb 100644
--- a/lib/portage/gpkg.py
+++ b/lib/portage/gpkg.py
@@ -1,7 +1,8 @@
-# Copyright 2001-2020 Gentoo Authors
+# Copyright 2001-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import tarfile
+import traceback
import io
import threading
import subprocess
@@ -34,6 +35,7 @@ from portage.exception import (
DigestException,
MissingSignature,
InvalidSignature,
+ SignedPackage,
)
from portage.output import colorize, EOutput
from portage.util._urlopen import urlopen
@@ -150,7 +152,10 @@ class tar_stream_writer:
if self.proc is not None:
self.killed = True
self.proc.kill()
- self.proc.stdin.close()
+ try:
+ self.proc.stdin.close()
+ except BrokenPipeError:
+ traceback.print_exc()
self.close()
def _cmd_read_thread(self):
@@ -212,7 +217,7 @@ class tar_stream_writer:
if self.proc is not None:
self.proc.stdin.close()
if self.proc.wait() != os.EX_OK:
- if not self.error:
+ if not (self.killed or self.error):
raise CompressorOperationFailed("compression failed")
if self.read_thread.is_alive():
self.read_thread.join()
@@ -348,7 +353,10 @@ class tar_stream_reader:
if self.proc is not None:
self.killed = True
self.proc.kill()
- self.proc.stdin.close()
+ try:
+ self.proc.stdin.close()
+ except BrokenPipeError:
+ traceback.print_exc()
self.close()
def read(self, bufsize=-1):
@@ -985,22 +993,30 @@ class gpkg:
try:
image_safe = tar_safe_extract(image, "image")
image_safe.extractall(decompress_dir)
+ image_tar.close()
except Exception as ex:
writemsg(colorize("BAD", "!!!Extract failed."))
raise
finally:
- image_tar.kill()
+ if not image_tar.closed:
+ image_tar.kill()
- def update_metadata(self, metadata, new_basename=None):
+ def update_metadata(self, metadata, new_basename=None, force=False):
"""
Update metadata in the gpkg file.
"""
self._verify_binpkg()
self.checksums = []
- old_basename = self.prefix
+ if self.signature_exist and not force:
+ raise SignedPackage("Cannot update a signed gpkg file")
if new_basename is None:
- new_basename = old_basename
+ if self.basename:
+ new_basename = self.basename
+ elif self.prefix:
+ new_basename = self.prefix
+ else:
+ raise InvalidBinaryPackageFormat("No basename or prefix specified")
else:
new_basename = new_basename.split("/", maxsplit=1)[-1]
self.basename = new_basename
@@ -2095,7 +2111,7 @@ class gpkg:
if self.basename and self.prefix and not self.prefix.startswith(self.basename):
writemsg(
- colorize("WARN", f"Package basename mismatched, using {self.prefix}")
+ colorize("WARN", f"Package basename mismatched, using {self.prefix}\n")
)
all_files = tar.getmembers()
diff --git a/lib/portage/output.py b/lib/portage/output.py
index cdeeb18e9..7d3a6278f 100644
--- a/lib/portage/output.py
+++ b/lib/portage/output.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2021 Gentoo Authors
+# Copyright 1998-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__docformat__ = "epytext"
@@ -13,7 +13,9 @@ import portage
portage.proxy.lazyimport.lazyimport(
globals(),
+ "portage.process:spawn",
"portage.util:writemsg",
+ "portage.util.futures:asyncio",
)
import portage.util.formatter as formatter
@@ -557,13 +559,20 @@ def set_term_size(lines, columns, fd):
Set the number of lines and columns for the tty that is connected to fd.
For portability, this simply calls `stty rows $lines columns $columns`.
"""
- from portage.process import spawn
cmd = ["stty", "rows", str(lines), "columns", str(columns)]
try:
- spawn(cmd, env=os.environ, fd_pipes={0: fd})
+ proc = spawn(cmd, env=os.environ, fd_pipes={0: fd}, returnproc=True)
except CommandNotFound:
writemsg(_("portage: stty: command not found\n"), noiselevel=-1)
+ else:
+ loop = asyncio.get_event_loop()
+ if loop.is_running():
+ asyncio.ensure_future(proc.wait(), loop).add_done_callback(
+ lambda future: future.result()
+ )
+ else:
+ loop.run_until_complete(proc.wait())
class EOutput:
diff --git a/lib/portage/package/ebuild/config.py b/lib/portage/package/ebuild/config.py
index 55018edbd..72670cbe1 100644
--- a/lib/portage/package/ebuild/config.py
+++ b/lib/portage/package/ebuild/config.py
@@ -29,7 +29,6 @@ portage.proxy.lazyimport.lazyimport(
"portage.dbapi.vartree:vartree",
"portage.package.ebuild.doebuild:_phase_func_map",
"portage.util.compression_probe:_compressors",
- "portage.util.locale:check_locale,split_LC_ALL",
)
from portage import bsd_chflags, load_mod, os, selinux, _unicode_decode
from portage.const import (
@@ -868,10 +867,10 @@ class config:
# Initialize all USE related variables we track ourselves.
self.usemask = self._use_manager.getUseMask()
self.useforce = self._use_manager.getUseForce()
- self.configdict["conf"][
- "USE"
- ] = self._use_manager.extract_global_USE_changes(
- self.configdict["conf"].get("USE", "")
+ self.configdict["conf"]["USE"] = (
+ self._use_manager.extract_global_USE_changes(
+ self.configdict["conf"].get("USE", "")
+ )
)
# Read license_groups and optionally license_groups and package.license from user config
@@ -881,10 +880,10 @@ class config:
user_config=local_config,
)
# Extract '*/*' entries from package.license
- self.configdict["conf"][
- "ACCEPT_LICENSE"
- ] = self._license_manager.extract_global_changes(
- self.configdict["conf"].get("ACCEPT_LICENSE", "")
+ self.configdict["conf"]["ACCEPT_LICENSE"] = (
+ self._license_manager.extract_global_changes(
+ self.configdict["conf"].get("ACCEPT_LICENSE", "")
+ )
)
# profile.bashrc
@@ -1070,9 +1069,9 @@ class config:
# reasonable defaults; this is important as without USE_ORDER,
# USE will always be "" (nothing set)!
if "USE_ORDER" not in self:
- self[
- "USE_ORDER"
- ] = "env:pkg:conf:defaults:pkginternal:features:repo:env.d"
+ self["USE_ORDER"] = (
+ "env:pkg:conf:defaults:pkginternal:features:repo:env.d"
+ )
self.backup_changes("USE_ORDER")
if "CBUILD" not in self and "CHOST" in self:
@@ -1698,14 +1697,14 @@ class config:
if use is None:
use = frozenset(settings["PORTAGE_USE"].split())
- values[
- "ACCEPT_LICENSE"
- ] = settings._license_manager.get_prunned_accept_license(
- settings.mycpv,
- use,
- settings.get("LICENSE", ""),
- settings.get("SLOT"),
- settings.get("PORTAGE_REPO_NAME"),
+ values["ACCEPT_LICENSE"] = (
+ settings._license_manager.get_prunned_accept_license(
+ settings.mycpv,
+ use,
+ settings.get("LICENSE", ""),
+ settings.get("SLOT"),
+ settings.get("PORTAGE_REPO_NAME"),
+ )
)
values["PORTAGE_PROPERTIES"] = self._flatten("PROPERTIES", use, settings)
values["PORTAGE_RESTRICT"] = self._flatten("RESTRICT", use, settings)
@@ -2123,6 +2122,9 @@ class config:
"test" in restrict
and not "all" in allow_test
and not ("test_network" in properties and "network" in allow_test)
+ and not (
+ "test_privileged" in properties and "privileged" in allow_test
+ )
)
if restrict_test and "test" in self.features:
@@ -3377,20 +3379,17 @@ class config:
mydict["EBUILD_PHASE_FUNC"] = phase_func
if eapi_attrs.posixish_locale:
- split_LC_ALL(mydict)
- mydict["LC_COLLATE"] = "C"
- # check_locale() returns None when check can not be executed.
- if check_locale(silent=True, env=mydict) is False:
- # try another locale
- for l in ("C.UTF-8", "en_US.UTF-8", "en_GB.UTF-8", "C"):
- mydict["LC_CTYPE"] = l
- if check_locale(silent=True, env=mydict):
- # TODO: output the following only once
- # writemsg(_("!!! LC_CTYPE unsupported, using %s instead\n")
- # % mydict["LC_CTYPE"])
- break
- else:
- raise AssertionError("C locale did not pass the test!")
+ if mydict.get("LC_ALL"):
+ # Sometimes this method is called for processes
+ # that are not ebuild phases, so only raise
+ # AssertionError for actual ebuild phases.
+ if phase and phase not in ("clean", "cleanrm", "fetch"):
+ raise AssertionError(
+ f"LC_ALL={mydict['LC_ALL']} for posixish locale. It seems that split_LC_ALL was not called for phase {phase}?"
+ )
+ elif "LC_ALL" in mydict:
+ # Delete placeholder from split_LC_ALL.
+ del mydict["LC_ALL"]
if not eapi_attrs.exports_PORTDIR:
mydict.pop("PORTDIR", None)
diff --git a/lib/portage/package/ebuild/doebuild.py b/lib/portage/package/ebuild/doebuild.py
index c627077a2..7994394bd 100644
--- a/lib/portage/package/ebuild/doebuild.py
+++ b/lib/portage/package/ebuild/doebuild.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2023 Gentoo Authors
+# Copyright 2010-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["doebuild", "doebuild_environment", "spawn", "spawnebuild"]
@@ -19,6 +19,7 @@ import sys
import tempfile
from textwrap import wrap
import time
+from typing import Union
import warnings
import zlib
# PREFIX LOCAL
@@ -82,6 +83,7 @@ from portage.dep import (
paren_enclose,
use_reduce,
)
+from portage.dep.libc import find_libc_deps
from portage.eapi import (
eapi_exports_KV,
eapi_exports_merge_type,
@@ -124,7 +126,7 @@ from portage.util.futures.executor.fork import ForkExecutor
from portage.util.path import first_existing
from portage.util.socks5 import get_socks5_proxy
from portage.util._dyn_libs.dyn_libs import check_dyn_libs_inconsistent
-from portage.versions import _pkgsplit
+from portage.versions import _pkgsplit, pkgcmp
from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
@@ -243,6 +245,9 @@ def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
ebuild_sh_arg,
)
+ if phase == "test" and "test_privileged" in settings["PORTAGE_PROPERTIES"].split():
+ kwargs["droppriv"] = False
+
settings["EBUILD_PHASE"] = phase
try:
return spawn(cmd, settings, **kwargs)
@@ -251,14 +256,21 @@ def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
def _spawn_phase(
- phase, settings, actionmap=None, returnpid=False, logfile=None, **kwargs
+ phase,
+ settings,
+ actionmap=None,
+ returnpid=False,
+ returnproc=False,
+ logfile=None,
+ **kwargs,
):
- if returnpid:
+ if returnproc or returnpid:
return _doebuild_spawn(
phase,
settings,
actionmap=actionmap,
returnpid=returnpid,
+ returnproc=returnproc,
logfile=logfile,
**kwargs,
)
@@ -601,9 +613,9 @@ def doebuild_environment(
if nproc:
mysettings["MAKEOPTS"] = "-j%d" % (nproc)
if "GNUMAKEFLAGS" not in mysettings and "MAKEFLAGS" not in mysettings:
- mysettings[
- "GNUMAKEFLAGS"
- ] = f"--load-average {nproc} --output-sync=line"
+ mysettings["GNUMAKEFLAGS"] = (
+ f"--load-average {nproc} --output-sync=line"
+ )
if not eapi_exports_KV(eapi):
# Discard KV for EAPIs that don't support it. Cached KV is restored
@@ -735,7 +747,8 @@ def doebuild(
prev_mtimes=None,
fd_pipes=None,
returnpid=False,
-):
+ returnproc=False,
+) -> Union[int, portage.process.MultiprocessingProcess, list[int]]:
"""
Wrapper function that invokes specific ebuild phases through the spawning
of ebuild.sh
@@ -772,9 +785,15 @@ def doebuild(
for example.
@type fd_pipes: Dictionary
@param returnpid: Return a list of process IDs for a successful spawn, or
- an integer value if spawn is unsuccessful. NOTE: This requires the
- caller clean up all returned PIDs.
+ an integer value if spawn is unsuccessful. This parameter is supported
+ supported only when mydo is "depend". NOTE: This requires the caller clean
+ up all returned PIDs.
@type returnpid: Boolean
+ @param returnproc: Return a MultiprocessingProcess instance for a successful spawn, or
+ an integer value if spawn is unsuccessful. This parameter is supported
+ supported only when mydo is "depend". NOTE: This requires the caller to
+ asynchronously wait for the MultiprocessingProcess instance.
+ @type returnproc: Boolean
@rtype: Boolean
@return:
1. 0 for success
@@ -877,17 +896,25 @@ def doebuild(
writemsg("\n", noiselevel=-1)
return 1
- if returnpid and mydo != "depend":
+ if (returnproc or returnpid) and mydo != "depend":
# This case is not supported, since it bypasses the EbuildPhase class
# which implements important functionality (including post phase hooks
# and IPC for things like best/has_version and die).
+ if returnproc:
+ raise NotImplementedError(f"returnproc not implemented for phase {mydo}")
warnings.warn(
"portage.doebuild() called "
"with returnpid parameter enabled. This usage will "
"not be supported in the future.",
- DeprecationWarning,
+ UserWarning,
stacklevel=2,
)
+ elif returnpid:
+ warnings.warn(
+ "The portage.doebuild() returnpid parameter is deprecated and replaced by returnproc",
+ UserWarning,
+ stacklevel=1,
+ )
if mydo == "fetchall":
fetchall = 1
@@ -1037,10 +1064,14 @@ def doebuild(
# get possible slot information from the deps file
if mydo == "depend":
- if not returnpid:
- raise TypeError("returnpid must be True for depend phase")
+ if not (returnproc or returnpid):
+ raise TypeError("returnproc or returnpid must be True for depend phase")
return _spawn_phase(
- mydo, mysettings, fd_pipes=fd_pipes, returnpid=returnpid
+ mydo,
+ mysettings,
+ fd_pipes=fd_pipes,
+ returnpid=returnpid,
+ returnproc=returnproc,
)
if mydo == "nofetch":
@@ -1317,32 +1348,20 @@ def doebuild(
dist_digests = mf.getTypeDigests("DIST")
loop = asyncio._safe_loop()
- if loop.is_running():
- # Called by EbuildFetchonly for emerge --pretend --fetchonly.
- success = fetch(
+ success = loop.run_until_complete(
+ loop.run_in_executor(
+ ForkExecutor(loop=loop),
+ _fetch_subprocess,
fetchme,
mysettings,
- listonly=listonly,
- fetchonly=fetchonly,
- allow_missing_digests=False,
- digests=dist_digests,
- )
- else:
- success = loop.run_until_complete(
- loop.run_in_executor(
- ForkExecutor(loop=loop),
- _fetch_subprocess,
- fetchme,
- mysettings,
- listonly,
- dist_digests,
- fetchonly,
- )
+ listonly,
+ dist_digests,
+ fetchonly,
)
+ )
if not success:
# Since listonly mode is called by emerge --pretend in an
- # asynchronous context, spawn_nofetch would trigger event loop
- # recursion here, therefore delegate execution of pkg_nofetch
+ # asynchronous context, execution of pkg_nofetch is delegated
# to the caller (bug 657360).
if not listonly:
spawn_nofetch(
@@ -2189,7 +2208,7 @@ def spawn(
mysettings.configdict["env"]["LOGNAME"] = logname
try:
- if keywords.get("returnpid"):
+ if keywords.get("returnpid") or keywords.get("returnproc"):
return spawn_func(mystring, env=mysettings.environ(), **keywords)
proc = EbuildSpawnProcess(
@@ -2369,11 +2388,11 @@ def _check_build_log(mysettings, out=None):
f = gzip.GzipFile(filename="", mode="rb", fileobj=f)
am_maintainer_mode = []
- bash_command_not_found = []
+ command_not_found = []
bash_command_not_found_re = re.compile(
r"(.*): line (\d*): (.*): command not found$"
)
- command_not_found_exclude_re = re.compile(r"/configure: line ")
+ dash_command_not_found_re = re.compile(r"(.*): (\d+): (.*): not found$")
helper_missing_file = []
helper_missing_file_re = re.compile(r"^!!! (do|new).*: .* does not exist$")
@@ -2477,11 +2496,11 @@ def _check_build_log(mysettings, out=None):
):
am_maintainer_mode.append(line.rstrip("\n"))
- if (
- bash_command_not_found_re.match(line) is not None
- and command_not_found_exclude_re.search(line) is None
- ):
- bash_command_not_found.append(line.rstrip("\n"))
+ if bash_command_not_found_re.match(line) is not None:
+ command_not_found.append(line.rstrip("\n"))
+
+ if dash_command_not_found_re.match(line) is not None:
+ command_not_found.append(line.rstrip("\n"))
if helper_missing_file_re.match(line) is not None:
helper_missing_file.append(line.rstrip("\n"))
@@ -2543,10 +2562,10 @@ def _check_build_log(mysettings, out=None):
)
_eqawarn(msg)
- if bash_command_not_found:
+ if command_not_found:
msg = [_("QA Notice: command not found:")]
msg.append("")
- msg.extend("\t" + line for line in bash_command_not_found)
+ msg.extend("\t" + line for line in command_not_found)
_eqawarn(msg)
if helper_missing_file:
@@ -2587,8 +2606,8 @@ def _post_src_install_write_metadata(settings):
"""
eapi_attrs = _get_eapi_attrs(settings.configdict["pkg"]["EAPI"])
-
build_info_dir = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
+ metadata_buffer = {}
metadata_keys = ["IUSE"]
if eapi_attrs.iuse_effective:
@@ -2597,12 +2616,12 @@ def _post_src_install_write_metadata(settings):
for k in metadata_keys:
v = settings.configdict["pkg"].get(k)
if v is not None:
- write_atomic(os.path.join(build_info_dir, k), v + "\n")
+ metadata_buffer[k] = v
for k in ("CHOST",):
v = settings.get(k)
if v is not None:
- write_atomic(os.path.join(build_info_dir, k), v + "\n")
+ metadata_buffer[k] = v
with open(
_unicode_encode(
@@ -2642,17 +2661,7 @@ def _post_src_install_write_metadata(settings):
except OSError:
pass
continue
- with open(
- _unicode_encode(
- os.path.join(build_info_dir, k),
- encoding=_encodings["fs"],
- errors="strict",
- ),
- mode="w",
- encoding=_encodings["repo.content"],
- errors="strict",
- ) as f:
- f.write(f"{v}\n")
+ metadata_buffer[k] = v
if eapi_attrs.slot_operator:
deps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())
@@ -2664,17 +2673,20 @@ def _post_src_install_write_metadata(settings):
except OSError:
pass
continue
- with open(
- _unicode_encode(
- os.path.join(build_info_dir, k),
- encoding=_encodings["fs"],
- errors="strict",
- ),
- mode="w",
- encoding=_encodings["repo.content"],
+
+ metadata_buffer[k] = v
+
+ for k, v in metadata_buffer.items():
+ with open(
+ _unicode_encode(
+ os.path.join(build_info_dir, k),
+ encoding=_encodings["fs"],
errors="strict",
- ) as f:
- f.write(f"{v}\n")
+ ),
+ mode="w",
+ encoding=_encodings["repo.content"],
+ ) as f:
+ f.write(f"{v}\n")
def _preinst_bsdflags(mysettings):
@@ -2879,10 +2891,12 @@ def _post_src_install_uid_fix(mysettings, out):
# a normal write might fail due to file permission
# settings on some operating systems such as HP-UX
write_atomic(
- fpath
- if portage.utf8_mode
- else _unicode_encode(
- fpath, encoding=_encodings["merge"], errors="strict"
+ (
+ fpath
+ if portage.utf8_mode
+ else _unicode_encode(
+ fpath, encoding=_encodings["merge"], errors="strict"
+ )
),
new_contents,
mode="wb",
@@ -2956,6 +2970,48 @@ def _reapply_bsdflags_to_image(mysettings):
)
+def _inject_libc_dep(build_info_dir, mysettings):
+ #
+ # We could skip this for non-binpkgs but there doesn't seem to be much
+ # value in that, as users shouldn't downgrade libc anyway.
+ injected_libc_depstring = []
+ for libc_realized_atom in find_libc_deps(
+ QueryCommand.get_db()[mysettings["EROOT"]]["vartree"].dbapi, True
+ ):
+ if pkgcmp(mysettings.mycpv, libc_realized_atom) is not None:
+ # We don't want to inject deps on ourselves (libc)
+ injected_libc_depstring = []
+ break
+
+ injected_libc_depstring.append(f">={libc_realized_atom}")
+
+ rdepend_file = os.path.join(build_info_dir, "RDEPEND")
+ # Slurp the existing contents because we need to mangle it a bit
+ # It'll look something like (if it exists):
+ # ```
+ # app-misc/foo dev-libs/bar
+ # <newline>
+ # ````
+ rdepend = None
+ if os.path.exists(rdepend_file):
+ with open(rdepend_file, encoding="utf-8") as f:
+ rdepend = f.readlines()
+ rdepend = "\n".join(rdepend).strip()
+
+ # For RDEPEND, we want an implicit dependency on >=${PROVIDER_OF_LIBC}
+ # to avoid runtime breakage when merging binpkgs, see bug #753500.
+ #
+ if injected_libc_depstring:
+ if rdepend:
+ rdepend += f" {' '.join(injected_libc_depstring).strip()}"
+ else:
+ # The package doesn't have an RDEPEND, so make one up.
+ rdepend = " ".join(injected_libc_depstring)
+
+ with open(rdepend_file, "w", encoding="utf-8") as f:
+ f.write(f"{rdepend}\n")
+
+
def _post_src_install_soname_symlinks(mysettings, out):
"""
Check that libraries in $D have corresponding soname symlinks.
@@ -2965,9 +3021,8 @@ def _post_src_install_soname_symlinks(mysettings, out):
"""
image_dir = mysettings["D"]
- needed_filename = os.path.join(
- mysettings["PORTAGE_BUILDDIR"], "build-info", "NEEDED.ELF.2"
- )
+ build_info_dir = os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info")
+ needed_filename = os.path.join(build_info_dir, "NEEDED.ELF.2")
f = None
try:
@@ -2987,6 +3042,11 @@ def _post_src_install_soname_symlinks(mysettings, out):
if f is not None:
f.close()
+ # We do RDEPEND mangling here instead of the natural location
+ # in _post_src_install_write_metadata because NEEDED hasn't been
+ # written yet at that point.
+ _inject_libc_dep(build_info_dir, mysettings)
+
metadata = {}
for k in ("QA_PREBUILT", "QA_SONAME_NO_SYMLINK"):
try:
diff --git a/lib/portage/package/ebuild/fetch.py b/lib/portage/package/ebuild/fetch.py
index d67d3115f..ca15a6c7d 100644
--- a/lib/portage/package/ebuild/fetch.py
+++ b/lib/portage/package/ebuild/fetch.py
@@ -1565,6 +1565,7 @@ def fetch(
tried_locations.add(loc)
if listonly:
writemsg_stdout(loc + " ", noiselevel=-1)
+ fetched = 2
continue
# allow different fetchcommands per protocol
protocol = loc[0 : loc.find("://")]
diff --git a/lib/portage/process.py b/lib/portage/process.py
index de1c89047..ead11e318 100644
--- a/lib/portage/process.py
+++ b/lib/portage/process.py
@@ -1,5 +1,5 @@
# portage.py -- core Portage functionality
-# Copyright 1998-2023 Gentoo Authors
+# Copyright 1998-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
@@ -15,9 +15,11 @@ import subprocess
import sys
import traceback
import os as _os
+import warnings
from dataclasses import dataclass
-from functools import lru_cache
+from functools import lru_cache, partial
+from typing import Any, Optional, Callable, Union
from portage import os
from portage import _encodings
@@ -26,6 +28,9 @@ import portage
portage.proxy.lazyimport.lazyimport(
globals(),
+ "portage.util._async.ForkProcess:ForkProcess",
+ "portage.util._eventloop.global_event_loop:global_event_loop",
+ "portage.util.futures:asyncio",
"portage.util:dump_traceback,writemsg,writemsg_level",
)
@@ -296,12 +301,180 @@ def calc_env_stats(env) -> EnvStats:
env_too_large_warnings = 0
+class AbstractProcess:
+ def send_signal(self, sig):
+ """Send a signal to the process."""
+ if self.returncode is not None:
+ # Skip signalling a process that we know has already died.
+ return
+
+ try:
+ os.kill(self.pid, sig)
+ except ProcessLookupError:
+ # Suppress the race condition error; bpo-40550.
+ pass
+
+
+class Process(AbstractProcess):
+ """
+ An object that wraps OS processes which do not have an
+ associated multiprocessing.Process instance. Ultimately,
+ we need to stop using os.fork() to create these processes
+ because it is unsafe for threaded processes as discussed
+ in https://github.com/python/cpython/issues/84559.
+
+ Note that if subprocess.Popen is used without pass_fds
+ or preexec_fn parameters, then it avoids using os.fork()
+ by instead using posix_spawn. This approach is not used
+ by spawn because it needs to execute python code prior
+ to exec, so it instead uses multiprocessing.Process,
+ which only uses os.fork() when the multiprocessing start
+ method is fork.
+ """
+
+ def __init__(self, pid: int):
+ self.pid = pid
+ self.returncode = None
+ self._exit_waiters = []
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} {self.pid}>"
+
+ async def wait(self):
+ """
+ Wait for the child process to terminate.
+
+ Set and return the returncode attribute.
+ """
+ if self.returncode is not None:
+ return self.returncode
+
+ loop = global_event_loop()
+ if not self._exit_waiters:
+ loop._asyncio_child_watcher.add_child_handler(self.pid, self._child_handler)
+ waiter = loop.create_future()
+ self._exit_waiters.append(waiter)
+ return await waiter
+
+ def _child_handler(self, pid, returncode):
+ if pid != self.pid:
+ raise AssertionError(f"expected pid {self.pid}, got {pid}")
+ self.returncode = returncode
+
+ for waiter in self._exit_waiters:
+ if not waiter.cancelled():
+ waiter.set_result(returncode)
+ self._exit_waiters = None
+
+ def terminate(self):
+ """Terminate the process with SIGTERM"""
+ self.send_signal(signal.SIGTERM)
+
+ def kill(self):
+ """Kill the process with SIGKILL"""
+ self.send_signal(signal.SIGKILL)
+
+
+class MultiprocessingProcess(AbstractProcess):
+ """
+ An object that wraps OS processes created by multiprocessing.Process.
+ """
+
+ # Number of seconds between poll attempts for process exit status
+ # (after the sentinel has become ready).
+ _proc_join_interval = 0.1
+
+ def __init__(self, proc: multiprocessing.Process):
+ self._proc = proc
+ self.pid = proc.pid
+ self.returncode = None
+ self._exit_waiters = []
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} {self.pid}>"
+
+ async def wait(self):
+ """
+ Wait for the child process to terminate.
+
+ Set and return the returncode attribute.
+ """
+ if self.returncode is not None:
+ return self.returncode
+
+ loop = global_event_loop()
+ if not self._exit_waiters:
+ asyncio.ensure_future(self._proc_join(), loop=loop).add_done_callback(
+ self._proc_join_done
+ )
+ waiter = loop.create_future()
+ self._exit_waiters.append(waiter)
+ return await waiter
+
+ async def _proc_join(self):
+ loop = global_event_loop()
+ sentinel_reader = loop.create_future()
+ proc = self._proc
+ loop.add_reader(
+ proc.sentinel,
+ lambda: sentinel_reader.done() or sentinel_reader.set_result(None),
+ )
+ try:
+ await sentinel_reader
+ finally:
+ # If multiprocessing.Process supports the close method, then
+ # access to proc.sentinel will raise ValueError if the
+ # sentinel has been closed. In this case it's not safe to call
+ # remove_reader, since the file descriptor may have been closed
+ # and then reallocated to a concurrent coroutine. When the
+ # close method is not supported, proc.sentinel remains open
+ # until proc's finalizer is called.
+ try:
+ loop.remove_reader(proc.sentinel)
+ except ValueError:
+ pass
+
+ # Now that proc.sentinel is ready, poll until process exit
+ # status has become available.
+ while True:
+ proc.join(0)
+ if proc.exitcode is not None:
+ break
+ await asyncio.sleep(self._proc_join_interval, loop=loop)
+
+ def _proc_join_done(self, future):
+ # The join task should never be cancelled, so let it raise
+ # asyncio.CancelledError here if that somehow happens.
+ future.result()
+
+ self.returncode = self._proc.exitcode
+ if hasattr(self._proc, "close"):
+ self._proc.close()
+ self._proc = None
+
+ for waiter in self._exit_waiters:
+ if not waiter.cancelled():
+ waiter.set_result(self.returncode)
+ self._exit_waiters = None
+
+ def terminate(self):
+ """Terminate the process with SIGTERM"""
+ if self._proc is not None:
+ self._proc.terminate()
+
+ def kill(self):
+ """Kill the process with SIGKILL"""
+ if self._proc is not None:
+ self._proc.kill()
+
+
def spawn(
mycommand,
env=None,
opt_name=None,
fd_pipes=None,
returnpid=False,
+ returnproc=False,
uid=None,
gid=None,
groups=None,
@@ -316,7 +489,7 @@ def spawn(
unshare_mount=False,
unshare_pid=False,
warn_on_large_env=False,
-):
+) -> Union[int, MultiprocessingProcess, list[int]]:
"""
Spawns a given command.
@@ -334,6 +507,9 @@ def spawn(
@param returnpid: Return the Process IDs for a successful spawn.
NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
@type returnpid: Boolean
+ @param returnproc: Return a MultiprocessingProcess instance (conflicts with logfile parameter).
+ NOTE: This requires the caller to asynchronously wait for the MultiprocessingProcess instance.
+ @type returnproc: Boolean
@param uid: User ID to spawn as; useful for dropping privilages
@type uid: Integer
@param gid: Group ID to spawn as; useful for dropping privilages
@@ -369,11 +545,19 @@ def spawn(
"""
+ if logfile and returnproc:
+ raise ValueError(
+ "logfile parameter conflicts with returnproc (use fd_pipes instead)"
+ )
+
# mycommand is either a str or a list
if isinstance(mycommand, str):
mycommand = mycommand.split()
env = os.environ if env is None else env
+ # Sometimes os.environ can fail to pickle as shown in bug 923750
+ # comment 4, so copy it to a dict.
+ env = env if isinstance(env, dict) else dict(env)
env_stats = None
if warn_on_large_env:
@@ -422,10 +606,10 @@ def spawn(
# Create a tee process, giving it our stdout and stderr
# as well as the read end of the pipe.
- mypids.extend(
+ mypids.append(
spawn(
("tee", "-i", "-a", logfile),
- returnpid=True,
+ returnproc=True,
fd_pipes={0: pr, 1: fd_pipes[1], 2: fd_pipes[2]},
)
)
@@ -470,71 +654,39 @@ def spawn(
# fork, so that the result is cached in the main process.
bool(groups)
- parent_pid = portage.getpid()
- pid = None
- try:
- pid = os.fork()
-
- if pid == 0:
- portage._ForkWatcher.hook(portage._ForkWatcher)
- try:
- _exec(
- binary,
- mycommand,
- opt_name,
- fd_pipes,
- env,
- gid,
- groups,
- uid,
- umask,
- cwd,
- pre_exec,
- close_fds,
- unshare_net,
- unshare_ipc,
- unshare_mount,
- unshare_pid,
- unshare_flags,
- )
- except SystemExit:
- raise
- except Exception as e:
- if isinstance(e, OSError) and e.errno == errno.E2BIG:
- # If exec() failed with E2BIG, then this is
- # potentially because the environment variables
- # grew to large. The following will gather some
- # stats about the environment and print a
- # diagnostic message to help identifying the
- # culprit. See also
- # - https://bugs.gentoo.org/721088
- # - https://bugs.gentoo.org/830187
- if not env_stats:
- env_stats = calc_env_stats(env)
-
- writemsg(
- f"ERROR: Executing {mycommand} failed with E2BIG. Child process environment size: {env_stats.env_size} bytes. Largest environment variable: {env_stats.env_largest_name} ({env_stats.env_largest_size} bytes)\n"
- )
-
- # We need to catch _any_ exception so that it doesn't
- # propagate out of this function and cause exiting
- # with anything other than os._exit()
- writemsg(f"{e}:\n {' '.join(mycommand)}\n", noiselevel=-1)
- traceback.print_exc()
- sys.stderr.flush()
-
- finally:
- # Don't used portage.getpid() here, due to a race with the above
- # portage._ForkWatcher cache update.
- if pid == 0 or (pid is None and _os.getpid() != parent_pid):
- # Call os._exit() from a finally block in order
- # to suppress any finally blocks from earlier
- # in the call stack (see bug #345289). This
- # finally block has to be setup before the fork
- # in order to avoid a race condition.
- os._exit(1)
-
- if not isinstance(pid, int):
+ start_func = _start_proc if returnproc or not returnpid else _start_fork
+
+ pid = start_func(
+ _exec_wrapper,
+ args=(
+ binary,
+ mycommand,
+ opt_name,
+ fd_pipes,
+ env,
+ gid,
+ groups,
+ uid,
+ umask,
+ cwd,
+ pre_exec,
+ close_fds,
+ unshare_net,
+ unshare_ipc,
+ unshare_mount,
+ unshare_pid,
+ unshare_flags,
+ env_stats,
+ ),
+ fd_pipes=fd_pipes,
+ close_fds=close_fds,
+ )
+
+ if returnproc:
+ # _start_proc returns a MultiprocessingProcess instance.
+ return pid
+
+ if returnpid and not isinstance(pid, int):
raise AssertionError(f"fork returned non-integer: {repr(pid)}")
# Add the pid to our local and the global pid lists.
@@ -548,8 +700,15 @@ def spawn(
# If the caller wants to handle cleaning up the processes, we tell
# it about all processes that were created.
if returnpid:
+ warnings.warn(
+ "The portage.process.spawn returnpid parameter is deprecated and replaced by returnproc",
+ UserWarning,
+ stacklevel=1,
+ )
return mypids
+ loop = global_event_loop()
+
# Otherwise we clean them up.
while mypids:
# Pull the last reader in the pipe chain. If all processes
@@ -558,25 +717,22 @@ def spawn(
pid = mypids.pop(0)
# and wait for it.
- retval = os.waitpid(pid, 0)[1]
+ retval = loop.run_until_complete(pid.wait())
if retval:
# If it failed, kill off anything else that
# isn't dead yet.
for pid in mypids:
- # With waitpid and WNOHANG, only check the
- # first element of the tuple since the second
- # element may vary (bug #337465).
- if os.waitpid(pid, os.WNOHANG)[0] == 0:
- os.kill(pid, signal.SIGTERM)
- os.waitpid(pid, 0)
-
- # If it got a signal, return the signal that was sent.
- if retval & 0xFF:
- return (retval & 0xFF) << 8
+ waiter = asyncio.ensure_future(pid.wait(), loop)
+ try:
+ loop.run_until_complete(
+ asyncio.wait_for(asyncio.shield(waiter), 0.001)
+ )
+ except (TimeoutError, asyncio.TimeoutError):
+ pid.terminate()
+ loop.run_until_complete(waiter)
- # Otherwise, return its exit code.
- return retval >> 8
+ return retval
# Everything succeeded
return 0
@@ -654,6 +810,71 @@ def _configure_loopback_interface():
)
+def _exec_wrapper(
+ binary,
+ mycommand,
+ opt_name,
+ fd_pipes,
+ env,
+ gid,
+ groups,
+ uid,
+ umask,
+ cwd,
+ pre_exec,
+ close_fds,
+ unshare_net,
+ unshare_ipc,
+ unshare_mount,
+ unshare_pid,
+ unshare_flags,
+ env_stats,
+):
+ """
+ Calls _exec with the given args and handles any raised Exception.
+ The intention is for _exec_wrapper and _exec to be reusable with
+ other process cloning implementations besides _start_fork.
+ """
+ try:
+ _exec(
+ binary,
+ mycommand,
+ opt_name,
+ fd_pipes,
+ env,
+ gid,
+ groups,
+ uid,
+ umask,
+ cwd,
+ pre_exec,
+ close_fds,
+ unshare_net,
+ unshare_ipc,
+ unshare_mount,
+ unshare_pid,
+ unshare_flags,
+ )
+ except Exception as e:
+ if isinstance(e, OSError) and e.errno == errno.E2BIG:
+ # If exec() failed with E2BIG, then this is
+ # potentially because the environment variables
+ # grew to large. The following will gather some
+ # stats about the environment and print a
+ # diagnostic message to help identifying the
+ # culprit. See also
+ # - https://bugs.gentoo.org/721088
+ # - https://bugs.gentoo.org/830187
+ if not env_stats:
+ env_stats = calc_env_stats(env)
+
+ writemsg(
+ f"ERROR: Executing {mycommand} failed with E2BIG. Child process environment size: {env_stats.env_size} bytes. Largest environment variable: {env_stats.env_largest_name} ({env_stats.env_largest_size} bytes)\n"
+ )
+ writemsg(f"{e}:\n {' '.join(mycommand)}\n", noiselevel=-1)
+ raise
+
+
def _exec(
binary,
mycommand,
@@ -754,15 +975,19 @@ def _exec(
# the parent process (see bug #289486).
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
- _setup_pipes(fd_pipes, close_fds=close_fds, inheritable=True)
-
# Unshare (while still uid==0)
if unshare_net or unshare_ipc or unshare_mount or unshare_pid:
filename = find_library("c")
if filename is not None:
libc = LoadLibrary(filename)
if libc is not None:
- try:
+ # unshare() may not be supported by libc
+ if not hasattr(libc, "unshare"):
+ unshare_net = False
+ unshare_ipc = False
+ unshare_mount = False
+ unshare_pid = False
+ else:
# Since a failed unshare call could corrupt process
# state, first validate that the call can succeed.
# The parent process should call _unshare_validate
@@ -793,120 +1018,154 @@ def _exec(
)
else:
if unshare_pid:
- main_child_pid = os.fork()
- if main_child_pid == 0:
- # The portage.getpid() cache may need to be updated here,
- # in case the pre_exec function invokes portage APIs.
- portage._ForkWatcher.hook(portage._ForkWatcher)
- # pid namespace requires us to become init
- binary, myargs = (
- portage._python_interpreter,
- [
- portage._python_interpreter,
- os.path.join(portage._bin_path, "pid-ns-init"),
- _unicode_encode(
- "" if uid is None else str(uid)
- ),
- _unicode_encode(
- "" if gid is None else str(gid)
- ),
- _unicode_encode(
- ""
- if groups is None
- else ",".join(
- str(group) for group in groups
- )
- ),
- _unicode_encode(
- "" if umask is None else str(umask)
- ),
- _unicode_encode(
- ",".join(str(fd) for fd in fd_pipes)
- ),
- binary,
- ]
- + myargs,
- )
- uid = None
- gid = None
- groups = None
- umask = None
- else:
- # Execute a supervisor process which will forward
- # signals to init and forward exit status to the
- # parent process. The supervisor process runs in
- # the global pid namespace, so skip /proc remount
- # and other setup that's intended only for the
- # init process.
- binary, myargs = portage._python_interpreter, [
+ # pid namespace requires us to become init
+ binary, myargs = (
+ portage._python_interpreter,
+ [
portage._python_interpreter,
os.path.join(portage._bin_path, "pid-ns-init"),
- str(main_child_pid),
+ _unicode_encode("" if uid is None else str(uid)),
+ _unicode_encode("" if gid is None else str(gid)),
+ _unicode_encode(
+ ""
+ if groups is None
+ else ",".join(str(group) for group in groups)
+ ),
+ _unicode_encode(
+ "" if umask is None else str(umask)
+ ),
+ _unicode_encode(
+ ",".join(str(fd) for fd in fd_pipes)
+ ),
+ binary,
]
-
- os.execve(binary, myargs, env)
-
- if unshare_mount:
- # mark the whole filesystem as slave to avoid
- # mounts escaping the namespace
- s = subprocess.Popen(["mount", "--make-rslave", "/"])
- mount_ret = s.wait()
- if mount_ret != 0:
- # TODO: should it be fatal maybe?
- writemsg(
- "Unable to mark mounts slave: %d\n" % (mount_ret,),
- noiselevel=-1,
- )
- if unshare_pid:
- # we need at least /proc being slave
- s = subprocess.Popen(["mount", "--make-slave", "/proc"])
- mount_ret = s.wait()
- if mount_ret != 0:
- # can't proceed with shared /proc
- writemsg(
- "Unable to mark /proc slave: %d\n" % (mount_ret,),
- noiselevel=-1,
- )
- os._exit(1)
- # mount new /proc for our namespace
- s = subprocess.Popen(
- ["mount", "-n", "-t", "proc", "proc", "/proc"]
+ + myargs,
)
- mount_ret = s.wait()
- if mount_ret != 0:
- writemsg(
- "Unable to mount new /proc: %d\n" % (mount_ret,),
- noiselevel=-1,
- )
- os._exit(1)
- if unshare_net:
- # use 'localhost' to avoid hostname resolution problems
- try:
- # pypy3 does not implement socket.sethostname()
- new_hostname = b"localhost"
- if hasattr(socket, "sethostname"):
- socket.sethostname(new_hostname)
- else:
- if (
- libc.sethostname(
- new_hostname, len(new_hostname)
- )
- != 0
- ):
- errno_value = ctypes.get_errno()
- raise OSError(
- errno_value, os.strerror(errno_value)
- )
- except Exception as e:
- writemsg(
- 'Unable to set hostname: %s (for FEATURES="network-sandbox")\n'
- % (e,),
- noiselevel=-1,
- )
- _configure_loopback_interface()
- except AttributeError:
- # unshare() not supported by libc
- pass
+ uid = None
+ gid = None
+ groups = None
+ umask = None
+
+ # Use _start_fork for os.fork() error handling, ensuring
+ # that if exec fails then the child process will display
+ # a traceback before it exits via os._exit to suppress any
+ # finally blocks from parent's call stack (bug 345289).
+ main_child_pid = _start_fork(
+ _exec2,
+ args=(
+ binary,
+ myargs,
+ env,
+ gid,
+ groups,
+ uid,
+ umask,
+ cwd,
+ pre_exec,
+ unshare_net,
+ unshare_ipc,
+ unshare_mount,
+ unshare_pid,
+ ),
+ fd_pipes=None,
+ close_fds=False,
+ )
+
+ # Execute a supervisor process which will forward
+ # signals to init and forward exit status to the
+ # parent process. The supervisor process runs in
+ # the global pid namespace, so skip /proc remount
+ # and other setup that's intended only for the
+ # init process.
+ binary, myargs = portage._python_interpreter, [
+ portage._python_interpreter,
+ os.path.join(portage._bin_path, "pid-ns-init"),
+ str(main_child_pid),
+ ]
+
+ os.execve(binary, myargs, env)
+
+ # Reachable only if unshare_pid is False.
+ _exec2(
+ binary,
+ myargs,
+ env,
+ gid,
+ groups,
+ uid,
+ umask,
+ cwd,
+ pre_exec,
+ unshare_net,
+ unshare_ipc,
+ unshare_mount,
+ unshare_pid,
+ )
+
+
+def _exec2(
+ binary,
+ myargs,
+ env,
+ gid,
+ groups,
+ uid,
+ umask,
+ cwd,
+ pre_exec,
+ unshare_net,
+ unshare_ipc,
+ unshare_mount,
+ unshare_pid,
+):
+ if unshare_mount:
+ # mark the whole filesystem as slave to avoid
+ # mounts escaping the namespace
+ s = subprocess.Popen(["mount", "--make-rslave", "/"])
+ mount_ret = s.wait()
+ if mount_ret != 0:
+ # TODO: should it be fatal maybe?
+ writemsg(
+ "Unable to mark mounts slave: %d\n" % (mount_ret,),
+ noiselevel=-1,
+ )
+ if unshare_pid:
+ # we need at least /proc being slave
+ s = subprocess.Popen(["mount", "--make-slave", "/proc"])
+ mount_ret = s.wait()
+ if mount_ret != 0:
+ # can't proceed with shared /proc
+ writemsg(
+ "Unable to mark /proc slave: %d\n" % (mount_ret,),
+ noiselevel=-1,
+ )
+ os._exit(1)
+ # mount new /proc for our namespace
+ s = subprocess.Popen(["mount", "-n", "-t", "proc", "proc", "/proc"])
+ mount_ret = s.wait()
+ if mount_ret != 0:
+ writemsg(
+ "Unable to mount new /proc: %d\n" % (mount_ret,),
+ noiselevel=-1,
+ )
+ os._exit(1)
+ if unshare_net:
+ # use 'localhost' to avoid hostname resolution problems
+ try:
+ # pypy3 does not implement socket.sethostname()
+ new_hostname = b"localhost"
+ if hasattr(socket, "sethostname"):
+ socket.sethostname(new_hostname)
+ else:
+ if libc.sethostname(new_hostname, len(new_hostname)) != 0:
+ errno_value = ctypes.get_errno()
+ raise OSError(errno_value, os.strerror(errno_value))
+ except Exception as e:
+ writemsg(
+ f'Unable to set hostname: {e} (for FEATURES="network-sandbox")\n',
+ noiselevel=-1,
+ )
+ _configure_loopback_interface()
# Set requested process permissions.
if gid:
@@ -1064,7 +1323,7 @@ def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
actually does nothing in this case), which avoids possible
interference.
"""
-
+ fd_pipes = {} if fd_pipes is None else fd_pipes
reverse_map = {}
# To protect from cases where direct assignment could
# clobber needed fds ({1:2, 2:1}) we create a reverse map
@@ -1138,6 +1397,119 @@ def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
pass
+def _start_fork(
+ target: Callable[..., None],
+ args: Optional[tuple[Any, ...]] = (),
+ kwargs: Optional[dict[str, Any]] = {},
+ fd_pipes: Optional[dict[int, int]] = None,
+ close_fds: Optional[bool] = True,
+) -> int:
+ """
+ Execute the target function in a fork. The fd_pipes and
+ close_fds parameters are handled in the fork, before the target
+ function is called. The args and kwargs parameters are passed
+ as positional and keyword arguments for the target function.
+
+ The target, args, and kwargs parameters are intended to
+ be equivalent to the corresponding multiprocessing.Process
+ constructor parameters.
+
+ Ultimately, the intention is for spawn to support other
+ process cloning implementations besides _start_fork, since
+ fork is unsafe for threaded processes as discussed in
+ https://github.com/python/cpython/issues/84559.
+ """
+ parent_pid = portage.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid == 0:
+ try:
+ _setup_pipes(fd_pipes, close_fds=close_fds, inheritable=True)
+ target(*args, **kwargs)
+ except Exception:
+ # We need to catch _any_ exception and display it since the child
+ # process must unconditionally exit via os._exit() if exec fails.
+ traceback.print_exc()
+ sys.stderr.flush()
+ finally:
+ # Don't used portage.getpid() here, in case there is a race
+ # with getpid cache invalidation via _ForkWatcher hook.
+ if pid == 0 or (pid is None and _os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+ return pid
+
+
+class _chain_pre_exec_fns:
+ """
+ Wraps a target function to call pre_exec functions just before
+ the original target function.
+ """
+
+ def __init__(self, target, *args):
+ self._target = target
+ self._pre_exec_fns = args
+
+ def __call__(self, *args, **kwargs):
+ for pre_exec in self._pre_exec_fns:
+ pre_exec()
+ return self._target(*args, **kwargs)
+
+
+def _setup_pipes_after_fork(fd_pipes):
+ for fd in set(fd_pipes.values()):
+ os.set_inheritable(fd, True)
+ _setup_pipes(fd_pipes, close_fds=False, inheritable=True)
+
+
+def _start_proc(
+ target: Callable[..., None],
+ args: Optional[tuple[Any, ...]] = (),
+ kwargs: Optional[dict[str, Any]] = {},
+ fd_pipes: Optional[dict[int, int]] = None,
+ close_fds: Optional[bool] = False,
+) -> MultiprocessingProcess:
+ """
+ Execute the target function using multiprocess.Process.
+ If the close_fds parameter is True then NotImplementedError
+ is raised, since it is risky to forcefully close file
+ descriptors that have references (bug 374335), and PEP 446
+ should ensure that any relevant file descriptors are
+ non-inheritable and therefore automatically closed on exec.
+ """
+ if close_fds:
+ raise NotImplementedError(
+ "close_fds is not supported (since file descriptors are non-inheritable by default for exec)"
+ )
+
+ # Manage fd_pipes inheritance for spawn/exec (bug 923755),
+ # which ForkProcess does not handle because its target
+ # function does not necessarily exec.
+ if fd_pipes and multiprocessing.get_start_method() == "fork":
+ target = _chain_pre_exec_fns(target, partial(_setup_pipes_after_fork, fd_pipes))
+ fd_pipes = None
+
+ proc = ForkProcess(
+ scheduler=global_event_loop(),
+ target=target,
+ args=args,
+ kwargs=kwargs,
+ fd_pipes=fd_pipes,
+ create_pipe=False, # Pipe creation is delegated to the caller (see bug 923750).
+ )
+ proc.start()
+
+ # ForkProcess conveniently holds a MultiprocessingProcess
+ # instance that is suitable to return here.
+ return proc._proc
+
+
def find_binary(binary):
"""
Given a binary name, find the binary in PATH
diff --git a/lib/portage/proxy/objectproxy.py b/lib/portage/proxy/objectproxy.py
index 7cdc6f68d..f36464e19 100644
--- a/lib/portage/proxy/objectproxy.py
+++ b/lib/portage/proxy/objectproxy.py
@@ -6,7 +6,6 @@ __all__ = ["ObjectProxy"]
class ObjectProxy:
-
"""
Object that acts as a proxy to another object, forwarding
attribute accesses and method calls. This can be useful
diff --git a/lib/portage/sync/modules/git/git.py b/lib/portage/sync/modules/git/git.py
index 44d739ce6..8fdbf97de 100644
--- a/lib/portage/sync/modules/git/git.py
+++ b/lib/portage/sync/modules/git/git.py
@@ -500,6 +500,7 @@ class GitSync(NewBase):
opts = self.options.get("emerge_config").opts
debug = "--debug" in opts
quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ verbose = "--verbose" in opts
openpgp_env = self._get_openpgp_env(self.repo.sync_openpgp_key_path, debug)
@@ -534,35 +535,48 @@ class GitSync(NewBase):
"log.showsignature=0",
"log",
"-n1",
- "--pretty=format:%G?",
+ "--pretty=format:%G?%n%GF",
revision,
]
try:
- status = portage._unicode_decode(
+ lines = portage._unicode_decode(
subprocess.check_output(
rev_cmd,
cwd=portage._unicode_encode(self.repo.location),
env=env,
)
- ).strip()
+ ).splitlines()
except subprocess.CalledProcessError:
return False
+ status = lines[0].strip()
+ if len(lines) > 1:
+ signing_key = lines[1].strip()
+
if status == "G": # good signature is good
if not quiet:
- out.einfo("Trusted signature found on top commit")
+ message = "Trusted signature found on top commit"
+ if verbose:
+ message += (
+ f" (git revision: {revision}, signing key: {signing_key})"
+ )
+ out.einfo(message)
return True
if status == "U": # untrusted
- out.ewarn("Top commit signature is valid but not trusted")
+ out.ewarn(
+ f"Top commit signature is valid but not trusted (git revision: {revision}, signing key: {signing_key})"
+ )
return True
if status == "B":
- expl = "bad signature"
+ expl = (
+ f"bad signature using key {signing_key} on git revision {revision}"
+ )
elif status == "X":
- expl = "expired signature"
+ expl = f"expired signature using key {signing_key} on git revision {revision}"
elif status == "Y":
- expl = "expired key"
+ expl = f"expired key using key {signing_key} on git revision {revision}"
elif status == "R":
- expl = "revoked key"
+ expl = f"revoked key using key {signing_key} on git revision {revision}"
elif status == "E":
expl = "unable to verify signature (missing key?)"
elif status == "N":
diff --git a/lib/portage/sync/modules/rsync/rsync.py b/lib/portage/sync/modules/rsync/rsync.py
index 175c7f2e8..5d442d262 100644
--- a/lib/portage/sync/modules/rsync/rsync.py
+++ b/lib/portage/sync/modules/rsync/rsync.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2023 Gentoo Authors
+# Copyright 1999-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import datetime
@@ -708,48 +708,47 @@ class RsyncSync(NewBase):
command.append(syncuri.rstrip("/") + "/metadata/timestamp.chk")
command.append(tmpservertimestampfile)
content = None
- pids = []
+ proc = None
+ proc_waiter = None
+ loop = asyncio.get_event_loop()
try:
# Timeout here in case the server is unresponsive. The
# --timeout rsync option doesn't apply to the initial
# connection attempt.
try:
- if self.rsync_initial_timeout:
- portage.exception.AlarmSignal.register(self.rsync_initial_timeout)
-
- pids.extend(
- portage.process.spawn(command, returnpid=True, **self.spawn_kwargs)
+ proc = portage.process.spawn(
+ command, returnproc=True, **self.spawn_kwargs
+ )
+ proc_waiter = asyncio.ensure_future(proc.wait(), loop)
+ future = (
+ asyncio.wait_for(
+ asyncio.shield(proc_waiter), self.rsync_initial_timeout
+ )
+ if self.rsync_initial_timeout
+ else proc_waiter
)
- exitcode = os.waitpid(pids[0], 0)[1]
+ exitcode = loop.run_until_complete(future)
if self.usersync_uid is not None:
portage.util.apply_permissions(
tmpservertimestampfile, uid=os.getuid()
)
content = portage.grabfile(tmpservertimestampfile)
finally:
- if self.rsync_initial_timeout:
- portage.exception.AlarmSignal.unregister()
try:
os.unlink(tmpservertimestampfile)
except OSError:
pass
- except portage.exception.AlarmSignal:
+ except (TimeoutError, asyncio.TimeoutError):
# timed out
print("timed out")
# With waitpid and WNOHANG, only check the
# first element of the tuple since the second
# element may vary (bug #337465).
- if pids and os.waitpid(pids[0], os.WNOHANG)[0] == 0:
- os.kill(pids[0], signal.SIGTERM)
- os.waitpid(pids[0], 0)
+ if proc_waiter and not proc_waiter.done():
+ proc.terminate()
+ loop.run_until_complete(proc_waiter)
# This is the same code rsync uses for timeout.
exitcode = 30
- else:
- if exitcode != os.EX_OK:
- if exitcode & 0xFF:
- exitcode = (exitcode & 0xFF) << 8
- else:
- exitcode = exitcode >> 8
if content:
try:
@@ -758,7 +757,6 @@ class RsyncSync(NewBase):
)
except (OverflowError, ValueError):
pass
- del command, pids, content
if exitcode == os.EX_OK:
if (servertimestamp != 0) and (servertimestamp == timestamp):
diff --git a/lib/portage/tests/__init__.py b/lib/portage/tests/__init__.py
index ef5985298..23dd366d8 100644
--- a/lib/portage/tests/__init__.py
+++ b/lib/portage/tests/__init__.py
@@ -1,8 +1,9 @@
# tests/__init__.py -- Portage Unit Test functionality
-# Copyright 2006-2023 Gentoo Authors
+# Copyright 2006-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import argparse
+import multiprocessing
import sys
import time
import unittest
@@ -79,6 +80,15 @@ class TestCase(unittest.TestCase):
self.bindir = cnf_bindir
self.sbindir = cnf_sbindir
+ def setUp(self):
+ """
+ Setup multiprocessing start method if needed. It needs to be
+ done relatively late in order to work with the pytest-xdist
+ plugin due to execnet usage.
+ """
+ if os.environ.get("PORTAGE_MULTIPROCESSING_START_METHOD") == "spawn":
+ multiprocessing.set_start_method("spawn", force=True)
+
def assertRaisesMsg(self, msg, excClass, callableObj, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
diff --git a/lib/portage/tests/bin/setup_env.py b/lib/portage/tests/bin/setup_env.py
index faef118b0..5787f8768 100644
--- a/lib/portage/tests/bin/setup_env.py
+++ b/lib/portage/tests/bin/setup_env.py
@@ -1,5 +1,5 @@
# setup_env.py -- Make sure bin subdir has sane env for testing
-# Copyright 2007-2013 Gentoo Foundation
+# Copyright 2007-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -78,10 +78,7 @@ def portage_func(func, args, exit_status=0):
f = open("/dev/null", "wb")
fd_pipes = {0: 0, 1: f.fileno(), 2: f.fileno()}
- def pre_exec():
- os.chdir(env["S"])
-
- spawn([func] + args.split(), env=env, fd_pipes=fd_pipes, pre_exec=pre_exec)
+ spawn([func] + args.split(), env=env, fd_pipes=fd_pipes, cwd=env["S"])
f.close()
diff --git a/lib/portage/tests/dbapi/test_auxdb.py b/lib/portage/tests/dbapi/test_auxdb.py
index c11eed73e..aac6ce361 100644
--- a/lib/portage/tests/dbapi/test_auxdb.py
+++ b/lib/portage/tests/dbapi/test_auxdb.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2023 Gentoo Authors
+# Copyright 2020-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -16,9 +16,7 @@ class AuxdbTestCase(TestCase):
from portage.cache.anydbm import database
except ImportError:
self.skipTest("dbm import failed")
- self._test_mod(
- "portage.cache.anydbm.database", multiproc=False, picklable=False
- )
+ self._test_mod("portage.cache.anydbm.database", multiproc=False, picklable=True)
def test_flat_hash_md5(self):
self._test_mod("portage.cache.flat_hash.md5_database")
@@ -64,41 +62,50 @@ class AuxdbTestCase(TestCase):
user_config={"modules": (f"portdbapi.auxdbmodule = {auxdbmodule}",)},
)
- portdb = playground.trees[playground.eroot]["porttree"].dbapi
- metadata_keys = ["DEFINED_PHASES", "DEPEND", "EAPI", "INHERITED"]
-
- test_func = functools.partial(
- self._run_test_mod_async, ebuilds, metadata_keys, portdb
- )
-
- results = test_func()
-
- self._compare_results(
- ebuilds, eclass_defined_phases, eclass_depend, ebuild_inherited, results
- )
+ try:
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+ metadata_keys = ["DEFINED_PHASES", "DEPEND", "EAPI", "INHERITED"]
- loop = asyncio._wrap_loop()
- picklable_or_fork = picklable or multiprocessing.get_start_method == "fork"
- if picklable_or_fork:
- results = loop.run_until_complete(
- loop.run_in_executor(ForkExecutor(), test_func)
+ test_func = functools.partial(
+ self._run_test_mod_async, ebuilds, metadata_keys, portdb
)
+ results = test_func()
+
self._compare_results(
ebuilds, eclass_defined_phases, eclass_depend, ebuild_inherited, results
)
- auxdb = portdb.auxdb[portdb.getRepositoryPath("test_repo")]
- cpv = next(iter(ebuilds))
-
- modify_auxdb = functools.partial(self._modify_auxdb, auxdb, cpv)
-
- if multiproc and picklable_or_fork:
- loop.run_until_complete(loop.run_in_executor(ForkExecutor(), modify_auxdb))
- else:
- modify_auxdb()
-
- self.assertEqual(auxdb[cpv]["RESTRICT"], "test")
+ loop = asyncio._wrap_loop()
+ picklable_or_fork = picklable or multiprocessing.get_start_method == "fork"
+ if picklable_or_fork:
+ results = loop.run_until_complete(
+ loop.run_in_executor(ForkExecutor(), test_func)
+ )
+
+ self._compare_results(
+ ebuilds,
+ eclass_defined_phases,
+ eclass_depend,
+ ebuild_inherited,
+ results,
+ )
+
+ auxdb = portdb.auxdb[portdb.getRepositoryPath("test_repo")]
+ cpv = next(iter(ebuilds))
+
+ modify_auxdb = functools.partial(self._modify_auxdb, auxdb, cpv)
+
+ if multiproc and picklable_or_fork:
+ loop.run_until_complete(
+ loop.run_in_executor(ForkExecutor(), modify_auxdb)
+ )
+ else:
+ modify_auxdb()
+
+ self.assertEqual(auxdb[cpv]["RESTRICT"], "test")
+ finally:
+ playground.cleanup()
def _compare_results(
self, ebuilds, eclass_defined_phases, eclass_depend, ebuild_inherited, results
diff --git a/lib/portage/tests/dbapi/test_portdb_cache.py b/lib/portage/tests/dbapi/test_portdb_cache.py
index 2f14b7bdf..c24a4f209 100644
--- a/lib/portage/tests/dbapi/test_portdb_cache.py
+++ b/lib/portage/tests/dbapi/test_portdb_cache.py
@@ -1,6 +1,7 @@
-# Copyright 2012-2023 Gentoo Authors
+# Copyright 2012-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import shutil
import subprocess
import sys
import textwrap
@@ -63,6 +64,7 @@ class PortdbCacheTestCase(TestCase):
python_cmd = (portage_python, "-b", "-Wd", "-c")
test_commands = (
+ (lambda: shutil.rmtree(md5_cache_dir) or True,),
(lambda: not os.path.exists(pms_cache_dir),),
(lambda: not os.path.exists(md5_cache_dir),),
python_cmd
@@ -223,7 +225,7 @@ class PortdbCacheTestCase(TestCase):
pythonpath = PORTAGE_PYM_PATH + pythonpath
env = {
- "PATH": os.environ.get("PATH", ""),
+ "PATH": settings["PATH"],
"PORTAGE_OVERRIDE_EPREFIX": eprefix,
"PORTAGE_PYTHON": portage_python,
"PORTAGE_REPOSITORIES": settings.repositories.config_string(),
diff --git a/lib/portage/tests/dep/meson.build b/lib/portage/tests/dep/meson.build
index 2097b02f9..7350f7775 100644
--- a/lib/portage/tests/dep/meson.build
+++ b/lib/portage/tests/dep/meson.build
@@ -15,6 +15,7 @@ py.install_sources(
'test_get_required_use_flags.py',
'test_isjustname.py',
'test_isvalidatom.py',
+ 'test_libc.py',
'test_match_from_list.py',
'test_overlap_dnf.py',
'test_paren_reduce.py',
diff --git a/lib/portage/tests/dep/test_libc.py b/lib/portage/tests/dep/test_libc.py
new file mode 100644
index 000000000..6ea96d720
--- /dev/null
+++ b/lib/portage/tests/dep/test_libc.py
@@ -0,0 +1,81 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import Atom
+from portage.dep.libc import strip_libc_deps
+from portage.tests import TestCase
+
+
+class LibcUtilStripDeps(TestCase):
+ def testStripSimpleDeps(self):
+ """
+ Test that we strip a basic libc dependency out and return
+ a list of dependencies without it in there.
+ """
+
+ libc_dep = [Atom("=sys-libs/glibc-2.38")]
+
+ original_deps = (
+ [
+ Atom("=sys-libs/glibc-2.38"),
+ Atom("=app-misc/foo-1.2.3"),
+ ],
+ [
+ Atom("=sys-libs/glibc-2.38"),
+ ],
+ [
+ Atom("=app-misc/foo-1.2.3"),
+ Atom("=app-misc/bar-1.2.3"),
+ ],
+ )
+
+ for deplist in original_deps:
+ strip_libc_deps(deplist, libc_dep)
+
+ self.assertFalse(
+ all(libc in deplist for libc in libc_dep),
+ "Stripped deplist contains a libc candidate",
+ )
+
+ def testStripComplexRealizedDeps(self):
+ """
+ Test that we strip pathological libc dependencies out and return
+ a list of dependencies without it in there.
+ """
+
+ # This shouldn't really happen for a 'realized' dependency, but
+ # we shouldn't crash if it happens anyway.
+ libc_dep = [Atom("=sys-libs/glibc-2.38*[p]")]
+
+ original_deps = (
+ [
+ Atom("=sys-libs/glibc-2.38[x]"),
+ Atom("=app-misc/foo-1.2.3"),
+ ],
+ [
+ Atom("=sys-libs/glibc-2.38[p]"),
+ ],
+ [
+ Atom("=app-misc/foo-1.2.3"),
+ Atom("=app-misc/bar-1.2.3"),
+ ],
+ )
+
+ for deplist in original_deps:
+ strip_libc_deps(deplist, libc_dep)
+
+ self.assertFalse(
+ all(libc in deplist for libc in libc_dep),
+ "Stripped deplist contains a libc candidate",
+ )
+
+ def testStripNonRealizedDeps(self):
+ """
+ Check that we strip non-realized libc deps.
+ """
+
+ libc_dep = [Atom("sys-libs/glibc:2.2=")]
+ original_deps = [Atom(">=sys-libs/glibc-2.38-r7")]
+
+ strip_libc_deps(original_deps, libc_dep)
+ self.assertFalse(original_deps, "(g)libc dep was not stripped")
diff --git a/lib/portage/tests/dep/test_overlap_dnf.py b/lib/portage/tests/dep/test_overlap_dnf.py
index dfeded3b4..7fd1cfe7d 100644
--- a/lib/portage/tests/dep/test_overlap_dnf.py
+++ b/lib/portage/tests/dep/test_overlap_dnf.py
@@ -1,4 +1,4 @@
-# Copyright 2017 Gentoo Foundation
+# Copyright 2017-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -46,3 +46,50 @@ class OverlapDNFTestCase(TestCase):
_overlap_dnf(use_reduce(dep_str, token_class=Atom, opconvert=True)),
result,
)
+
+
+class DuplicateOverlapDNFTestCase(TestCase):
+ def testDuplicateOverlapDNF(self):
+ """
+ Demonstrate deduplication of any-of blocks, preventing unnecessary
+ DNF expansion for duplicate any-of blocks as in bug 891137.
+ """
+ test_cases = (
+ ("|| ( cat/A cat/B ) || ( cat/A cat/B )", [["||", "cat/A", "cat/B"]]),
+ (
+ "|| ( cat/A cat/B ) cat/E || ( cat/C cat/D ) || ( cat/A cat/B )",
+ ["cat/E", ["||", "cat/A", "cat/B"], ["||", "cat/C", "cat/D"]],
+ ),
+ (
+ "|| ( cat/A cat/B ) cat/D || ( cat/B cat/C ) || ( cat/A cat/B )",
+ [
+ "cat/D",
+ [
+ "||",
+ ["cat/A", "cat/B"],
+ ["cat/A", "cat/C"],
+ ["cat/B", "cat/B"],
+ ["cat/B", "cat/C"],
+ ],
+ ],
+ ),
+ (
+ "|| ( cat/A cat/B ) || ( cat/C cat/D ) || ( ( cat/B cat/E ) cat/F ) || ( cat/A cat/B )",
+ [
+ [
+ "||",
+ ["cat/A", "cat/B", "cat/E"],
+ ["cat/A", "cat/F"],
+ ["cat/B", "cat/B", "cat/E"],
+ ["cat/B", "cat/F"],
+ ],
+ ["||", "cat/C", "cat/D"],
+ ],
+ ),
+ )
+
+ for dep_str, result in test_cases:
+ self.assertEqual(
+ _overlap_dnf(use_reduce(dep_str, token_class=Atom, opconvert=True)),
+ result,
+ )
diff --git a/lib/portage/tests/ebuild/test_fetch.py b/lib/portage/tests/ebuild/test_fetch.py
index a9ca030ff..4812eb430 100644
--- a/lib/portage/tests/ebuild/test_fetch.py
+++ b/lib/portage/tests/ebuild/test_fetch.py
@@ -1,4 +1,4 @@
-# Copyright 2019-2021 Gentoo Authors
+# Copyright 2019-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -246,11 +246,11 @@ class EbuildFetchTestCase(TestCase):
"""
% orig_fetchcommand.replace("${FILE}", "${FILE}.__download__")
)
- settings[
- "FETCHCOMMAND"
- ] = '"{}" "{}" "${{URI}}" "${{DISTDIR}}" "${{FILE}}"'.format(
- BASH_BINARY,
- temp_fetchcommand,
+ settings["FETCHCOMMAND"] = (
+ '"{}" "{}" "${{URI}}" "${{DISTDIR}}" "${{FILE}}"'.format(
+ BASH_BINARY,
+ temp_fetchcommand,
+ )
)
settings.features.add("skiprocheck")
settings.features.remove("distlocks")
@@ -577,12 +577,12 @@ class EbuildFetchTestCase(TestCase):
content_db_state = dict(emdisconf.content_db.items())
self.assertEqual(content_db_state, dict(emdisconf.content_db.items()))
self.assertEqual(
- [
+ {
k[len("filename:") :]
for k in content_db_state
if k.startswith("filename:")
- ],
- ["bar", "foo"],
+ },
+ {"bar", "foo"},
)
self.assertEqual(
content_db_state["filename:foo"], content_db_state["filename:bar"]
@@ -615,12 +615,12 @@ class EbuildFetchTestCase(TestCase):
emdisconf.content_db.remove(filename)
# foo should still have a content revision corresponding to bar's content.
self.assertEqual(
- [
+ {
k[len("filename:") :]
for k in emdisconf.content_db
if k.startswith("filename:")
- ],
- ["bar", "foo"],
+ },
+ {"bar", "foo"},
)
self.assertEqual(len(emdisconf.content_db["filename:foo"]), 1)
self.assertEqual(
diff --git a/lib/portage/tests/emerge/conftest.py b/lib/portage/tests/emerge/conftest.py
index c534f5e9d..d9aec7041 100644
--- a/lib/portage/tests/emerge/conftest.py
+++ b/lib/portage/tests/emerge/conftest.py
@@ -814,9 +814,8 @@ def _generate_all_baseline_commands(playground, binhost):
)
# Remove binrepos.conf and test PORTAGE_BINHOST.
- def _replace_pkgdir_and_rm_binrepos_conf_file():
+ def _rm_pkgdir_and_rm_binrepos_conf_file():
shutil.rmtree(pkgdir)
- os.rename(binhost_dir, pkgdir)
os.unlink(binrepos_conf_file)
getbinpkgonly_fetchonly = Emerge(
@@ -824,11 +823,25 @@ def _generate_all_baseline_commands(playground, binhost):
"--getbinpkgonly",
"dev-libs/A",
env_mod={"PORTAGE_BINHOST": binhost_uri},
- preparation=_replace_pkgdir_and_rm_binrepos_conf_file,
+ preparation=_rm_pkgdir_and_rm_binrepos_conf_file,
+ )
+
+ # Test bug 920537 binrepos.conf with local file src-uri.
+ def _rm_pkgdir_and_create_binrepos_conf_with_file_uri():
+ shutil.rmtree(pkgdir)
+ with open(binrepos_conf_file, "w") as f:
+ f.write("[test-binhost]\n")
+ f.write(f"sync-uri = file://{binhost_dir}\n")
+
+ getbinpkgonly_file_uri = Emerge(
+ "-fe",
+ "--getbinpkgonly",
+ "dev-libs/A",
+ preparation=_rm_pkgdir_and_create_binrepos_conf_with_file_uri,
)
fetch_sequence = PortageCommandSequence(
- make_package, getbinpkgonly, getbinpkgonly_fetchonly
+ make_package, getbinpkgonly, getbinpkgonly_fetchonly, getbinpkgonly_file_uri
)
test_commands["binhost emerge"] = fetch_sequence
yield test_commands
diff --git a/lib/portage/tests/emerge/meson.build b/lib/portage/tests/emerge/meson.build
index b42945123..0e0a41974 100644
--- a/lib/portage/tests/emerge/meson.build
+++ b/lib/portage/tests/emerge/meson.build
@@ -1,11 +1,13 @@
py.install_sources(
[
'test_actions.py',
+ 'test_binpkg_fetch.py',
'test_config_protect.py',
'test_emerge_blocker_file_collision.py',
'test_emerge_slot_abi.py',
'test_global_updates.py',
'test_baseline.py',
+ 'test_libc_dep_inject.py',
'__init__.py',
'__test__.py',
],
diff --git a/lib/portage/tests/emerge/test_actions.py b/lib/portage/tests/emerge/test_actions.py
index 17e8b7a2b..cdc087a8e 100644
--- a/lib/portage/tests/emerge/test_actions.py
+++ b/lib/portage/tests/emerge/test_actions.py
@@ -3,7 +3,11 @@
from unittest.mock import MagicMock, patch
-from _emerge.actions import run_action
+from _emerge.actions import get_libc_version, run_action
+
+from portage.const import LIBC_PACKAGE_ATOM
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom
from portage.tests import TestCase
@@ -45,3 +49,20 @@ class RunActionTestCase(TestCase):
bt.populate.assert_called_once_with(
getbinpkgs=False, getbinpkg_refresh=True, pretend=False
)
+
+ def testGetSystemLibc(self):
+ """
+ Check that get_libc_version extracts the right version string
+ from the provider LIBC_PACKAGE_ATOM for emerge --info and friends.
+ """
+ settings = MagicMock()
+
+ settings.getvirtuals.return_value = {
+ LIBC_PACKAGE_ATOM: [Atom("=sys-libs/musl-1.2.3")]
+ }
+ settings.__getitem__.return_value = {}
+
+ vardb = fakedbapi(settings)
+ vardb.cpv_inject("sys-libs/musl-1.2.3", {"SLOT": "0"})
+
+ self.assertEqual(get_libc_version(vardb), ["musl-1.2.3"])
diff --git a/lib/portage/tests/emerge/test_baseline.py b/lib/portage/tests/emerge/test_baseline.py
index 8f4452894..eb4a3372d 100644
--- a/lib/portage/tests/emerge/test_baseline.py
+++ b/lib/portage/tests/emerge/test_baseline.py
@@ -97,7 +97,7 @@ async def _async_test_baseline(playground, binhost, commands):
profile_path = settings.profile_path
user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)
- path = os.environ.get("PATH")
+ path = settings.get("PATH")
if path is not None and not path.strip():
path = None
if path is None:
diff --git a/lib/portage/tests/emerge/test_binpkg_fetch.py b/lib/portage/tests/emerge/test_binpkg_fetch.py
new file mode 100644
index 000000000..731711bad
--- /dev/null
+++ b/lib/portage/tests/emerge/test_binpkg_fetch.py
@@ -0,0 +1,226 @@
+# Copyright 2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import subprocess
+import sys
+import tempfile
+
+import portage
+from portage import _unicode_decode, os
+from portage.const import (
+ PORTAGE_PYM_PATH,
+ USER_CONFIG_PATH,
+)
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+
+class BinpkgFetchtestCase(TestCase):
+ def testLocalFilePkgSyncUpdate(self):
+ """
+ Check handling of local file:// sync-uri and unnecessary BUILD_ID
+ increments (bug #921208).
+ """
+ debug = False
+
+ ebuilds = {
+ "dev-libs/A-1::local": {
+ "EAPI": "7",
+ "SLOT": "0",
+ },
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ trees = playground.trees
+ bindb = trees[eroot]["bintree"].dbapi
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+
+ portage_python = portage._python_interpreter
+ emerge_cmd = (
+ portage_python,
+ "-b",
+ "-Wd",
+ os.path.join(str(self.bindir), "emerge"),
+ )
+
+ tmppkgdir = tempfile.TemporaryDirectory()
+ tmppkgdir_suffix = os.path.join(tmppkgdir.name, "binpkg")
+
+ test_commands = (
+ # Create a trivial binpkg first.
+ emerge_cmd
+ + (
+ "--oneshot",
+ "--verbose",
+ "--buildpkg",
+ "dev-libs/A",
+ ),
+ # Copy to a new PKGDIR which we'll use as PORTAGE_BINHOST then delete the old PKGDIR.
+ (
+ (
+ lambda: shutil.copytree(bindb.bintree.pkgdir, tmppkgdir_suffix)
+ or True,
+ )
+ ),
+ (
+ (
+ lambda: os.unlink(
+ os.path.join(
+ bindb.bintree.pkgdir, "dev-libs", "A", "A-1-1.gpkg.tar"
+ )
+ )
+ or True,
+ )
+ ),
+ )
+ test_commands_nonfatal = (
+ # This should succeed if we've correctly saved it as A-1-1.gpkg.tar, not
+ # A-1-2.gpkg.tar, and then also try to unpack the right filename, but
+ # we defer checking the exit code to get a better error if the binpkg
+ # was downloaded with the wrong filename.
+ emerge_cmd
+ + (
+ "--oneshot",
+ "--verbose",
+ "--getbinpkgonly",
+ "dev-libs/A",
+ ),
+ )
+ test_commands_final = (
+ # Check whether the downloaded binpkg in PKGDIR has the correct
+ # filename (-1) or an unnecessarily-incremented one (-2).
+ (
+ lambda: os.path.exists(
+ os.path.join(
+ bindb.bintree.pkgdir, "dev-libs", "A", "A-1-1.gpkg.tar"
+ )
+ ),
+ ),
+ )
+
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+
+ path = settings.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX": eprefix,
+ "PATH": path,
+ "PORTAGE_PYTHON": portage_python,
+ "PORTAGE_REPOSITORIES": settings.repositories.config_string(),
+ "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH": pythonpath,
+ "PORTAGE_INST_GID": str(os.getgid()),
+ "PORTAGE_INST_UID": str(os.getuid()),
+ "FEATURES": "-pkgdir-index-trusted",
+ }
+
+ dirs = [
+ playground.distdir,
+ fake_bin,
+ portage_tmpdir,
+ user_config_dir,
+ var_cache_edb,
+ ]
+
+ true_symlinks = ["chown", "chgrp"]
+
+ needed_binaries = {
+ "true": (find_binary("true"), True),
+ }
+
+ def run_commands(test_commands, require_success=True):
+ all_successful = True
+
+ for i, args in enumerate(test_commands):
+ if hasattr(args[0], "__call__"):
+ if require_success:
+ self.assertTrue(args[0](), f"callable at index {i} failed")
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ local_env["PORTAGE_BINHOST"] = f"file:///{tmppkgdir_suffix}"
+ proc = subprocess.Popen(args, env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ if all_successful and proc.returncode != os.EX_OK:
+ all_successful = False
+
+ if require_success:
+ self.assertEqual(
+ os.EX_OK, proc.returncode, f"emerge failed with args {args}"
+ )
+
+ return all_successful
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(needed_binaries["true"][0], os.path.join(fake_bin, x))
+
+ with open(os.path.join(var_cache_edb, "counter"), "wb") as f:
+ f.write(b"100")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ run_commands(test_commands)
+ deferred_success = run_commands(test_commands_nonfatal, False)
+ run_commands(test_commands_final)
+
+ # Check the return value of test_commands_nonfatal later on so
+ # we can get a better error message from test_commands_final
+ # if possible.
+ self.assertTrue(deferred_success, f"{test_commands_nonfatal} failed")
+ finally:
+ playground.debug = False
+ playground.cleanup()
+ tmppkgdir.cleanup()
diff --git a/lib/portage/tests/emerge/test_config_protect.py b/lib/portage/tests/emerge/test_config_protect.py
index 560a49a76..e04fc1a92 100644
--- a/lib/portage/tests/emerge/test_config_protect.py
+++ b/lib/portage/tests/emerge/test_config_protect.py
@@ -191,7 +191,7 @@ src_install() {
fake_bin = os.path.join(eprefix, "bin")
portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
- path = os.environ.get("PATH")
+ path = settings.get("PATH")
if path is not None and not path.strip():
path = None
if path is None:
diff --git a/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py b/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py
index b29a83fce..1eb7da79f 100644
--- a/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py
+++ b/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py
@@ -98,7 +98,7 @@ src_install() {
portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
profile_path = settings.profile_path
- path = os.environ.get("PATH")
+ path = settings.get("PATH")
if path is not None and not path.strip():
path = None
if path is None:
diff --git a/lib/portage/tests/emerge/test_emerge_slot_abi.py b/lib/portage/tests/emerge/test_emerge_slot_abi.py
index 70a18b35c..c1a8fe894 100644
--- a/lib/portage/tests/emerge/test_emerge_slot_abi.py
+++ b/lib/portage/tests/emerge/test_emerge_slot_abi.py
@@ -111,7 +111,7 @@ class SlotAbiEmergeTestCase(TestCase):
portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
profile_path = settings.profile_path
- path = os.environ.get("PATH")
+ path = settings.get("PATH")
if path is not None and not path.strip():
path = None
if path is None:
diff --git a/lib/portage/tests/emerge/test_libc_dep_inject.py b/lib/portage/tests/emerge/test_libc_dep_inject.py
new file mode 100644
index 000000000..933affcd7
--- /dev/null
+++ b/lib/portage/tests/emerge/test_libc_dep_inject.py
@@ -0,0 +1,552 @@
+# Copyright 2016-2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import PORTAGE_PYM_PATH, USER_CONFIG_PATH
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.util import ensure_dirs
+
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+
+class LibcDepInjectEmergeTestCase(TestCase):
+ def testLibcDepInjection(self):
+ """
+ Test whether the implicit libc dependency injection (bug #913628)
+ is correctly added for only ebuilds installing an ELF binary.
+
+ Based on BlockerFileCollisionEmergeTestCase.
+ """
+ debug = False
+
+ install_elf = textwrap.dedent(
+ """
+ S="${WORKDIR}"
+
+ src_install() {
+ insinto /usr/bin
+ # We need an ELF binary for the injection to trigger, so
+ # use ${BASH} given we know it must be around for running ebuilds.
+ cp "${BASH}" "${ED}"/usr/bin/${PN} || die
+ }
+ """
+ )
+
+ ebuilds = {
+ "sys-libs/glibc-2.38": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ "virtual/libc-1": {
+ "EAPI": "8",
+ "RDEPEND": "sys-libs/glibc",
+ },
+ "dev-libs/A-1": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ "dev-libs/B-1": {
+ "EAPI": "8",
+ },
+ "dev-libs/C-1": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ "dev-libs/D-1": {
+ "EAPI": "8",
+ },
+ "dev-libs/E-1": {
+ "EAPI": "8",
+ "RDEPEND": ">=dev-libs/D-1",
+ "MISC_CONTENT": install_elf,
+ },
+ }
+
+ world = ("dev-libs/A",)
+
+ playground = ResolverPlayground(ebuilds=ebuilds, world=world, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+
+ portage_python = portage._python_interpreter
+ emerge_cmd = (
+ portage_python,
+ "-b",
+ "-Wd",
+ os.path.join(str(self.bindir), "emerge"),
+ )
+
+ test_commands = (
+ # If we install a package with an ELF but no libc provider is installed,
+ # make sure we don't inject anything (we don't want to have some bare RDEPEND with
+ # literally "[]").
+ emerge_cmd
+ + (
+ "--oneshot",
+ "dev-libs/C",
+ ),
+ (
+ lambda: not portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "dev-libs", "C-1", "RDEPEND"
+ )
+ ),
+ ),
+ # (We need sys-libs/glibc pulled in and virtual/libc installed)
+ emerge_cmd
+ + (
+ "--oneshot",
+ "virtual/libc",
+ ),
+ # A package NOT installing an ELF binary shouldn't have an injected libc dep
+ # Let's check the virtual/libc one as we already have to merge it to pull in
+ # sys-libs/glibc, but we'll do a better check after too.
+ (
+ lambda: ">=sys-libs/glibc-2.38\n"
+ not in portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "virtual", "libc-1", "RDEPEND"
+ )
+ ),
+ ),
+ # A package NOT installing an ELF binary shouldn't have an injected libc dep
+ emerge_cmd
+ + (
+ "--oneshot",
+ "dev-libs/B",
+ ),
+ (
+ lambda: not portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "dev-libs", "B-1", "RDEPEND"
+ )
+ ),
+ ),
+ # A package installing an ELF binary should have an injected libc dep
+ emerge_cmd
+ + (
+ "--oneshot",
+ "dev-libs/A",
+ ),
+ (lambda: os.path.exists(os.path.join(eroot, "usr/bin/A")),),
+ (
+ lambda: ">=sys-libs/glibc-2.38\n"
+ in portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "dev-libs", "A-1", "RDEPEND"
+ )
+ ),
+ ),
+ # Install glibc again because earlier, no libc was installed, so the injection
+ # wouldn't have fired even if the "are we libc?" check was broken.
+ emerge_cmd
+ + (
+ "--oneshot",
+ "sys-libs/glibc",
+ ),
+ # We don't want the libc (sys-libs/glibc is the provider here) to have an injected dep on itself
+ (
+ lambda: ">=sys-libs/glibc-2.38\n"
+ not in portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "sys-libs", "glibc-2.38", "RDEPEND"
+ )
+ ),
+ ),
+ # Make sure we append to, not clobber, RDEPEND
+ emerge_cmd
+ + (
+ "--oneshot",
+ "dev-libs/E",
+ ),
+ (
+ lambda: [">=dev-libs/D-1 >=sys-libs/glibc-2.38\n"]
+ == portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "dev-libs", "E-1", "RDEPEND"
+ )
+ ),
+ ),
+ )
+
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+
+ path = settings.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX": eprefix,
+ "PATH": path,
+ "PORTAGE_PYTHON": portage_python,
+ "PORTAGE_REPOSITORIES": settings.repositories.config_string(),
+ "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH": pythonpath,
+ "PORTAGE_INST_GID": str(os.getgid()),
+ "PORTAGE_INST_UID": str(os.getuid()),
+ "FEATURES": "-qa-unresolved-soname-deps -preserve-libs -merge-sync",
+ }
+
+ dirs = [
+ playground.distdir,
+ fake_bin,
+ portage_tmpdir,
+ user_config_dir,
+ var_cache_edb,
+ ]
+
+ true_symlinks = ["chown", "chgrp"]
+
+ # We don't want to make pax-utils a hard-requirement for tests,
+ # so if it's not found, skip the test rather than FAIL it.
+ needed_binaries = {
+ "true": (find_binary("true"), True),
+ "scanelf": (find_binary("scanelf"), False),
+ "find": (find_binary("find"), True),
+ }
+
+ for name, (path, mandatory) in needed_binaries.items():
+ found = path is not None
+
+ if not found:
+ if mandatory:
+ self.assertIsNotNone(path, f"command {name} not found")
+ else:
+ self.skipTest(f"{name} not found")
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(needed_binaries["true"][0], os.path.join(fake_bin, x))
+
+ # We need scanelf, find for the ELF parts (creating NEEDED)
+ os.symlink(needed_binaries["scanelf"][0], os.path.join(fake_bin, "scanelf"))
+ os.symlink(needed_binaries["find"][0], os.path.join(fake_bin, "find"))
+
+ with open(os.path.join(var_cache_edb, "counter"), "wb") as f:
+ f.write(b"100")
+ with open(os.path.join(profile_path, "packages"), "w") as f:
+ f.write("*virtual/libc")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+ if hasattr(args[0], "__call__"):
+ self.assertTrue(args[0](), f"callable at index {i} failed")
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ proc = subprocess.Popen(args, env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(
+ os.EX_OK, proc.returncode, f"emerge failed with args {args}"
+ )
+
+ # Check that dev-libs/A doesn't get re-emerged via --changed-deps
+ # after injecting the libc dep. We want to suppress the injected
+ # dep in the changed-deps comparisons.
+ k = ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--changed-deps": True,
+ "--deep": True,
+ "--update": True,
+ "--verbose": True,
+ },
+ success=True,
+ mergelist=[],
+ )
+ playground.run_TestCase(k)
+ self.assertEqual(k.test_success, True, k.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testBinpkgLibcDepInjection(self):
+ """
+ Test whether the implicit libc dependency injection (bug #913628)
+ correctly forces an upgrade to a newer glibc before merging a binpkg
+ built against it.
+
+ Based on BlockerFileCollisionEmergeTestCase.
+ """
+ debug = False
+
+ install_elf = textwrap.dedent(
+ """
+ S="${WORKDIR}"
+
+ src_install() {
+ insinto /usr/bin
+ # We need an ELF binary for the injection to trigger, so
+ # use ${BASH} given we know it must be around for running ebuilds.
+ cp "${BASH}" "${ED}"/usr/bin/${PN} || die
+ }
+ """
+ )
+
+ ebuilds = {
+ "sys-libs/glibc-2.37": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ "sys-libs/glibc-2.38": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ "virtual/libc-1": {
+ "EAPI": "8",
+ "RDEPEND": "sys-libs/glibc",
+ },
+ "dev-libs/A-1": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ "dev-libs/B-1": {
+ "EAPI": "8",
+ },
+ "dev-libs/C-1": {
+ "EAPI": "8",
+ "MISC_CONTENT": install_elf,
+ },
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+
+ portage_python = portage._python_interpreter
+ emerge_cmd = (
+ portage_python,
+ "-b",
+ "-Wd",
+ os.path.join(str(self.bindir), "emerge"),
+ )
+
+ test_commands = (
+ # (We need sys-libs/glibc pulled in and virtual/libc installed)
+ emerge_cmd
+ + (
+ "--oneshot",
+ "virtual/libc",
+ ),
+ # A package installing an ELF binary should have an injected libc dep
+ emerge_cmd
+ + (
+ "--oneshot",
+ "dev-libs/A",
+ ),
+ (lambda: os.path.exists(os.path.join(eroot, "usr/bin/A")),),
+ (
+ lambda: ">=sys-libs/glibc-2.38\n"
+ in portage.util.grablines(
+ os.path.join(
+ eprefix, "var", "db", "pkg", "dev-libs", "A-1", "RDEPEND"
+ )
+ ),
+ ),
+ # Downgrade glibc to a version (2.37) older than the version
+ # that dev-libs/A's binpkg was built against (2.38). Below,
+ # we check that it pulls in a newer glibc via a ResolverPlayground
+ # testcase.
+ emerge_cmd
+ + (
+ "--oneshot",
+ "--nodeps",
+ "<sys-libs/glibc-2.38",
+ ),
+ )
+
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+
+ path = settings.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX": eprefix,
+ "PATH": path,
+ "PORTAGE_PYTHON": portage_python,
+ "PORTAGE_REPOSITORIES": settings.repositories.config_string(),
+ "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH": pythonpath,
+ "PORTAGE_INST_GID": str(os.getgid()),
+ "PORTAGE_INST_UID": str(os.getuid()),
+ "FEATURES": "buildpkg",
+ }
+
+ dirs = [
+ playground.distdir,
+ fake_bin,
+ portage_tmpdir,
+ user_config_dir,
+ var_cache_edb,
+ ]
+
+ true_symlinks = ["chown", "chgrp"]
+
+ # We don't want to make pax-utils a hard-requirement for tests,
+ # so if it's not found, skip the test rather than FAIL it.
+ needed_binaries = {
+ "true": (find_binary("true"), True),
+ "scanelf": (find_binary("scanelf"), False),
+ "find": (find_binary("find"), True),
+ }
+
+ for name, (path, mandatory) in needed_binaries.items():
+ found = path is not None
+
+ if not found:
+ if mandatory:
+ self.assertIsNotNone(path, f"command {name} not found")
+ else:
+ self.skipTest(f"{name} not found")
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(needed_binaries["true"][0], os.path.join(fake_bin, x))
+
+ # We need scanelf, find for the ELF parts (creating NEEDED)
+ os.symlink(needed_binaries["scanelf"][0], os.path.join(fake_bin, "scanelf"))
+ os.symlink(needed_binaries["find"][0], os.path.join(fake_bin, "find"))
+
+ with open(os.path.join(var_cache_edb, "counter"), "wb") as f:
+ f.write(b"100")
+ with open(os.path.join(profile_path, "packages"), "w") as f:
+ f.write("*virtual/libc")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+ if hasattr(args[0], "__call__"):
+ self.assertTrue(args[0](), f"callable at index {i} failed")
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ proc = subprocess.Popen(args, env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(
+ os.EX_OK, proc.returncode, f"emerge failed with args {args}"
+ )
+
+ # Now check that glibc gets upgraded to the right version
+ # for the binpkg first after we downgraded it earlier, before
+ # merging the dev-libs/A binpkg which needs 2.38.
+ k = ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options={
+ "--usepkgonly": True,
+ "--verbose": True,
+ },
+ success=True,
+ mergelist=["[binary]sys-libs/glibc-2.38-1", "[binary]dev-libs/A-1-1"],
+ )
+ playground.run_TestCase(k)
+ self.assertEqual(k.test_success, True, k.fail_msg)
+
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/env/config/test_PortageModulesFile.py b/lib/portage/tests/env/config/test_PortageModulesFile.py
index f9879df68..bca86e0e6 100644
--- a/lib/portage/tests/env/config/test_PortageModulesFile.py
+++ b/lib/portage/tests/env/config/test_PortageModulesFile.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2009 Gentoo Foundation
+# Copyright 2006-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage import os
@@ -13,6 +13,7 @@ class PortageModulesFileTestCase(TestCase):
modules = ["spanky", "zmedico", "antarus", "ricer", "5", "6"]
def setUp(self):
+ super().setUp()
self.items = {}
for k, v in zip(self.keys + self.invalid_keys, self.modules):
self.items[k] = v
diff --git a/lib/portage/tests/glsa/test_security_set.py b/lib/portage/tests/glsa/test_security_set.py
index a0ba1e5b4..1206d9f80 100644
--- a/lib/portage/tests/glsa/test_security_set.py
+++ b/lib/portage/tests/glsa/test_security_set.py
@@ -1,4 +1,4 @@
-# Copyright 2013-2023 Gentoo Authors
+# Copyright 2013-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
@@ -226,7 +226,7 @@ class SecuritySetTestCase(TestCase):
# Give each GLSA a clean slate
for glsa in glsas:
playground = ResolverPlayground(
- ebuilds=ebuilds, installed=installed, world=world, debug=True
+ ebuilds=ebuilds, installed=installed, world=world, debug=False
)
try:
diff --git a/lib/portage/tests/gpkg/test_gpkg_gpg.py b/lib/portage/tests/gpkg/test_gpkg_gpg.py
index a2dc92150..d7eae4a82 100644
--- a/lib/portage/tests/gpkg/test_gpkg_gpg.py
+++ b/lib/portage/tests/gpkg/test_gpkg_gpg.py
@@ -1,4 +1,4 @@
-# Copyright Gentoo Foundation 2006-2020
+# Copyright 2022-2024 Gentoo Authors
# Portage Unit Testing Functionality
import io
@@ -26,6 +26,7 @@ class test_gpkg_gpg_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -68,6 +69,8 @@ class test_gpkg_gpg_case(TestCase):
InvalidSignature, binpkg_2.decompress, os.path.join(tmpdir, "test")
)
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -81,6 +84,7 @@ class test_gpkg_gpg_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -112,6 +116,8 @@ class test_gpkg_gpg_case(TestCase):
)
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -133,6 +139,7 @@ class test_gpkg_gpg_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -151,6 +158,8 @@ class test_gpkg_gpg_case(TestCase):
binpkg_2 = gpkg(settings, "test", os.path.join(tmpdir, "test-1.gpkg.tar"))
binpkg_2.decompress(os.path.join(tmpdir, "test"))
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -165,6 +174,7 @@ class test_gpkg_gpg_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -195,6 +205,8 @@ class test_gpkg_gpg_case(TestCase):
MissingSignature, binpkg_2.decompress, os.path.join(tmpdir, "test")
)
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -208,6 +220,7 @@ class test_gpkg_gpg_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -264,6 +277,8 @@ qGAN3VUF+8EsdcsV781H0F86PANhyBgEYTGDrnItTGe3/vAPjCo=
InvalidSignature, binpkg_2.decompress, os.path.join(tmpdir, "test")
)
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -285,6 +300,7 @@ qGAN3VUF+8EsdcsV781H0F86PANhyBgEYTGDrnItTGe3/vAPjCo=
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -306,6 +322,8 @@ qGAN3VUF+8EsdcsV781H0F86PANhyBgEYTGDrnItTGe3/vAPjCo=
)
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -319,6 +337,7 @@ qGAN3VUF+8EsdcsV781H0F86PANhyBgEYTGDrnItTGe3/vAPjCo=
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
try:
settings = playground.settings
@@ -370,5 +389,7 @@ EP1pgSXXGtlUnv6akg/wueFJKEr9KQs=
)
finally:
+ if gpg is not None:
+ gpg.stop()
shutil.rmtree(tmpdir)
playground.cleanup()
diff --git a/lib/portage/tests/gpkg/test_gpkg_metadata_update.py b/lib/portage/tests/gpkg/test_gpkg_metadata_update.py
index d2da630f3..51ad8b404 100644
--- a/lib/portage/tests/gpkg/test_gpkg_metadata_update.py
+++ b/lib/portage/tests/gpkg/test_gpkg_metadata_update.py
@@ -16,7 +16,7 @@ class test_gpkg_metadata_case(TestCase):
def test_gpkg_update_metadata(self):
playground = ResolverPlayground(
user_config={
- "make.conf": ('BINPKG_COMPRESS="gzip"',),
+ "make.conf": ('BINPKG_COMPRESS="gzip"', 'FEATURES="-binpkg-signing"'),
}
)
tmpdir = tempfile.mkdtemp()
diff --git a/lib/portage/tests/gpkg/test_gpkg_metadata_url.py b/lib/portage/tests/gpkg/test_gpkg_metadata_url.py
index 4b9f68a92..e9f411127 100644
--- a/lib/portage/tests/gpkg/test_gpkg_metadata_url.py
+++ b/lib/portage/tests/gpkg/test_gpkg_metadata_url.py
@@ -1,12 +1,12 @@
-# Copyright Gentoo Foundation 2006-2020
+# Copyright 2022-2024 Gentoo Authors
# Portage Unit Testing Functionality
import io
-import random
import tarfile
import tempfile
from functools import partial
from os import urandom
+from concurrent.futures import Future
from portage.gpkg import gpkg
from portage import os
@@ -18,7 +18,7 @@ from portage.gpg import GPG
class test_gpkg_metadata_url_case(TestCase):
- def httpd(self, directory, port):
+ def httpd(self, directory, httpd_future):
try:
import http.server
import socketserver
@@ -27,20 +27,22 @@ class test_gpkg_metadata_url_case(TestCase):
Handler = partial(http.server.SimpleHTTPRequestHandler, directory=directory)
- with socketserver.TCPServer(("127.0.0.1", port), Handler) as httpd:
+ with socketserver.TCPServer(("127.0.0.1", 0), Handler) as httpd:
+ httpd_future.set_result(httpd)
httpd.serve_forever()
- def start_http_server(self, directory, port):
+ def start_http_server(self, directory):
try:
import threading
except ImportError:
self.skipTest("threading module not exists")
+ httpd_future = Future()
server = threading.Thread(
- target=self.httpd, args=(directory, port), daemon=True
+ target=self.httpd, args=(directory, httpd_future), daemon=True
)
server.start()
- return server
+ return httpd_future.result()
def test_gpkg_get_metadata_url(self):
playground = ResolverPlayground(
@@ -53,15 +55,10 @@ class test_gpkg_metadata_url_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ server = None
try:
settings = playground.settings
- for _ in range(0, 5):
- port = random.randint(30000, 60000)
- try:
- server = self.start_http_server(tmpdir, port)
- except OSError:
- continue
- break
+ server = self.start_http_server(tmpdir)
orig_full_path = os.path.join(tmpdir, "orig/")
os.makedirs(orig_full_path)
@@ -80,11 +77,13 @@ class test_gpkg_metadata_url_case(TestCase):
test_gpkg.compress(os.path.join(tmpdir, "orig"), meta)
meta_from_url = test_gpkg.get_metadata_url(
- "http://127.0.0.1:" + str(port) + "/test.gpkg.tar"
+ "http://{}:{}/test.gpkg.tar".format(*server.server_address)
)
self.assertEqual(meta, meta_from_url)
finally:
+ if server is not None:
+ server.shutdown()
shutil.rmtree(tmpdir)
playground.cleanup()
@@ -98,18 +97,14 @@ class test_gpkg_metadata_url_case(TestCase):
}
)
tmpdir = tempfile.mkdtemp()
+ gpg = None
+ server = None
try:
settings = playground.settings
gpg = GPG(settings)
gpg.unlock()
- for _ in range(0, 5):
- port = random.randint(30000, 60000)
- try:
- server = self.start_http_server(tmpdir, port)
- except OSError:
- continue
- break
+ server = self.start_http_server(tmpdir)
orig_full_path = os.path.join(tmpdir, "orig/")
os.makedirs(orig_full_path)
@@ -153,8 +148,12 @@ IkCfAP49AOYjzuQPP0n5P0SGCINnAVEXN7QLQ4PurY/lt7cT2gEAq01stXjFhrz5
self.assertRaises(
InvalidSignature,
test_gpkg.get_metadata_url,
- "http://127.0.0.1:" + str(port) + "/test-2.gpkg.tar",
+ "http://{}:{}/test-2.gpkg.tar".format(*server.server_address),
)
finally:
+ if gpg is not None:
+ gpg.stop()
+ if server is not None:
+ server.shutdown()
shutil.rmtree(tmpdir)
playground.cleanup()
diff --git a/lib/portage/tests/gpkg/test_gpkg_path.py b/lib/portage/tests/gpkg/test_gpkg_path.py
index fc5713594..19451e2e9 100644
--- a/lib/portage/tests/gpkg/test_gpkg_path.py
+++ b/lib/portage/tests/gpkg/test_gpkg_path.py
@@ -1,4 +1,4 @@
-# Copyright Gentoo Foundation 2006
+# Copyright 2022-2024 Gentoo Authors
# Portage Unit Testing Functionality
import tempfile
@@ -308,6 +308,7 @@ class test_gpkg_path_case(TestCase):
self.assertEqual(r, ())
finally:
shutil.rmtree(tmpdir)
+ playground.cleanup()
def test_gpkg_long_filename(self):
playground = ResolverPlayground(
diff --git a/lib/portage/tests/locks/test_lock_nonblock.py b/lib/portage/tests/locks/test_lock_nonblock.py
index e3f9b4d02..d30dfe113 100644
--- a/lib/portage/tests/locks/test_lock_nonblock.py
+++ b/lib/portage/tests/locks/test_lock_nonblock.py
@@ -1,6 +1,8 @@
-# Copyright 2011-2020 Gentoo Authors
+# Copyright 2011-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import multiprocessing
+import sys
import tempfile
import traceback
@@ -17,38 +19,35 @@ class LockNonblockTestCase(TestCase):
try:
path = os.path.join(tempdir, "lock_me")
lock1 = portage.locks.lockfile(path)
- pid = os.fork()
- if pid == 0:
- portage._ForkWatcher.hook(portage._ForkWatcher)
- portage.locks._close_fds()
- # Disable close_fds since we don't exec
- # (see _setup_pipes docstring).
- portage.process._setup_pipes({0: 0, 1: 1, 2: 2}, close_fds=False)
- rval = 2
- try:
- try:
- lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
- except portage.exception.TryAgain:
- rval = os.EX_OK
- else:
- rval = 1
- portage.locks.unlockfile(lock2)
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- finally:
- os._exit(rval)
-
- self.assertEqual(pid > 0, True)
- pid, status = os.waitpid(pid, 0)
- self.assertEqual(os.WIFEXITED(status), True)
- self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
+ proc = multiprocessing.Process(target=self._lock_subprocess, args=(path,))
+ proc.start()
+ self.assertEqual(proc.pid > 0, True)
+ proc.join()
+ self.assertEqual(proc.exitcode, os.EX_OK)
portage.locks.unlockfile(lock1)
finally:
shutil.rmtree(tempdir)
+ @staticmethod
+ def _lock_subprocess(path):
+ portage.locks._close_fds()
+ # Disable close_fds since we don't exec
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes({0: 0, 1: 1, 2: 2}, close_fds=False)
+ rval = 2
+ try:
+ try:
+ lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
+ except portage.exception.TryAgain:
+ rval = os.EX_OK
+ else:
+ rval = 1
+ portage.locks.unlockfile(lock2)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(rval)
+
def testLockNonblock(self):
self._testLockNonblock()
diff --git a/lib/portage/tests/news/test_NewsItem.py b/lib/portage/tests/news/test_NewsItem.py
index a7903f07e..7a8393c51 100644
--- a/lib/portage/tests/news/test_NewsItem.py
+++ b/lib/portage/tests/news/test_NewsItem.py
@@ -1,5 +1,5 @@
# test_NewsItem.py -- Portage Unit Testing Functionality
-# Copyright 2007-2023 Gentoo Authors
+# Copyright 2007-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -114,6 +114,7 @@ class NewsItemTestCase(TestCase):
}
def setUp(self) -> None:
+ super().setUp()
self.profile_base = "/var/db/repos/gentoo/profiles/default-linux"
self.profile = f"{self.profile_base}/x86/2007.0/"
self.keywords = "x86"
diff --git a/lib/portage/tests/process/meson.build b/lib/portage/tests/process/meson.build
index b86fa10fb..e2b3c11d3 100644
--- a/lib/portage/tests/process/meson.build
+++ b/lib/portage/tests/process/meson.build
@@ -8,6 +8,7 @@ py.install_sources(
'test_pickle.py',
'test_poll.py',
'test_spawn_fail_e2big.py',
+ 'test_spawn_returnproc.py',
'test_spawn_warn_large_env.py',
'test_unshare_net.py',
'__init__.py',
diff --git a/lib/portage/tests/process/test_AsyncFunction.py b/lib/portage/tests/process/test_AsyncFunction.py
index 1faf8f49f..eb426a5c0 100644
--- a/lib/portage/tests/process/test_AsyncFunction.py
+++ b/lib/portage/tests/process/test_AsyncFunction.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2023 Gentoo Authors
+# Copyright 2020-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -36,26 +36,25 @@ class AsyncFunctionTestCase(TestCase):
scheduler=loop,
target=self._read_from_stdin,
args=(
- pw.fileno()
- if multiprocessing.get_start_method() == "fork"
- else None,
+ (
+ pw.fileno()
+ if multiprocessing.get_start_method() == "fork"
+ else None
+ ),
),
)
reader.start()
- # For compatibility with the multiprocessing spawn start
- # method, we delay restoration of the stdin file descriptor,
- # since this file descriptor is sent to the subprocess
- # asynchronously.
- _set_nonblocking(pw.fileno())
- with open(pw.fileno(), mode="wb", buffering=0, closefd=False) as pipe_write:
- await _writer(pipe_write, test_string.encode("utf_8"))
- pw.close()
- self.assertEqual((await reader.async_wait()), os.EX_OK)
- self.assertEqual(reader.result, test_string)
finally:
os.dup2(stdin_backup, portage._get_stdin().fileno())
os.close(stdin_backup)
+ _set_nonblocking(pw.fileno())
+ with open(pw.fileno(), mode="wb", buffering=0, closefd=False) as pipe_write:
+ await _writer(pipe_write, test_string.encode("utf_8"))
+ pw.close()
+ self.assertEqual((await reader.async_wait()), os.EX_OK)
+ self.assertEqual(reader.result, test_string)
+
def testAsyncFunctionStdin(self):
loop = asyncio._wrap_loop()
loop.run_until_complete(self._testAsyncFunctionStdin(loop=loop))
diff --git a/lib/portage/tests/process/test_spawn_fail_e2big.py b/lib/portage/tests/process/test_spawn_fail_e2big.py
index 7a0096630..abb1113fe 100644
--- a/lib/portage/tests/process/test_spawn_fail_e2big.py
+++ b/lib/portage/tests/process/test_spawn_fail_e2big.py
@@ -2,6 +2,7 @@
# Distributed under the terms of the GNU General Public License v2
import platform
+import resource
import pytest
@@ -12,7 +13,9 @@ from portage.const import BASH_BINARY
@pytest.mark.skipif(platform.system() != "Linux", reason="not Linux")
def test_spawnE2big(capsys, tmp_path):
env = dict()
- env["VERY_LARGE_ENV_VAR"] = "X" * 1024 * 256
+ # Kernel MAX_ARG_STRLEN is defined as 32 * PAGE_SIZE
+ max_arg_strlen_bytes = 32 * resource.getpagesize()
+ env["VERY_LARGE_ENV_VAR"] = "X" * max_arg_strlen_bytes
logfile = tmp_path / "logfile"
echo_output = "Should never appear"
@@ -24,7 +27,7 @@ def test_spawnE2big(capsys, tmp_path):
with open(logfile) as f:
logfile_content = f.read()
assert (
- "Largest environment variable: VERY_LARGE_ENV_VAR (262164 bytes)"
+ f"Largest environment variable: VERY_LARGE_ENV_VAR ({max_arg_strlen_bytes + 20} bytes)"
in logfile_content
)
assert retval == 1
diff --git a/lib/portage/tests/process/test_spawn_returnproc.py b/lib/portage/tests/process/test_spawn_returnproc.py
new file mode 100644
index 000000000..6d823d9c3
--- /dev/null
+++ b/lib/portage/tests/process/test_spawn_returnproc.py
@@ -0,0 +1,39 @@
+# Copyright 2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import signal
+
+from portage.process import find_binary, spawn
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+
+class SpawnReturnProcTestCase(TestCase):
+ def testSpawnReturnProcWait(self):
+ true_binary = find_binary("true")
+ self.assertNotEqual(true_binary, None)
+
+ loop = global_event_loop()
+
+ async def watch_pid():
+ proc = spawn([true_binary], returnproc=True)
+ self.assertEqual(await proc.wait(), os.EX_OK)
+
+ # A second wait should also work.
+ self.assertEqual(await proc.wait(), os.EX_OK)
+
+ loop.run_until_complete(watch_pid())
+
+ def testSpawnReturnProcTerminate(self):
+ sleep_binary = find_binary("sleep")
+ self.assertNotEqual(sleep_binary, None)
+
+ loop = global_event_loop()
+
+ async def watch_pid():
+ proc = spawn([sleep_binary, 9999], returnproc=True)
+ proc.terminate()
+ self.assertEqual(await proc.wait(), -signal.SIGTERM)
+
+ loop.run_until_complete(watch_pid())
diff --git a/lib/portage/tests/resolver/ResolverPlayground.py b/lib/portage/tests/resolver/ResolverPlayground.py
index bed3f38ac..1cb365449 100644
--- a/lib/portage/tests/resolver/ResolverPlayground.py
+++ b/lib/portage/tests/resolver/ResolverPlayground.py
@@ -1,8 +1,9 @@
-# Copyright 2010-2021 Gentoo Authors
+# Copyright 2010-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import bz2
import fnmatch
+import subprocess
import tempfile
import portage
@@ -18,8 +19,6 @@ from portage.const import (
from portage.process import find_binary
from portage.dep import Atom, _repo_separator
from portage.dbapi.bintree import binarytree
-from portage.package.ebuild.config import config
-from portage.package.ebuild.digestgen import digestgen
from portage._sets import load_default_config
from portage._sets.base import InternalPackageSet
from portage.tests import cnf_path
@@ -33,7 +32,11 @@ from _emerge.actions import _calc_depclean
from _emerge.Blocker import Blocker
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.DependencyArg import DependencyArg
-from _emerge.depgraph import backtrack_depgraph
+from _emerge.depgraph import (
+ _frozen_depgraph_config,
+ backtrack_depgraph,
+)
+from _emerge.Package import Package
from _emerge.RootConfig import RootConfig
@@ -139,6 +142,7 @@ class ResolverPlayground:
# EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin
eubin = os.path.join(self.eprefix, "usr", "bin")
ensure_dirs(eubin)
+ os.symlink(portage._python_interpreter, os.path.join(eubin, "python"))
for x in self.portage_bin:
os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eubin, x))
@@ -262,7 +266,7 @@ class ResolverPlayground:
try:
os.makedirs(profile_path)
- except os.error:
+ except OSError:
pass
repo_name_file = os.path.join(profile_path, "repo_name")
@@ -305,7 +309,7 @@ class ResolverPlayground:
ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
try:
os.makedirs(ebuild_dir)
- except os.error:
+ except OSError:
pass
with open(ebuild_path, "w") as f:
@@ -318,22 +322,25 @@ class ResolverPlayground:
f.write(misc_content)
def _create_ebuild_manifests(self, ebuilds):
- tmpsettings = config(clone=self.settings)
- tmpsettings["PORTAGE_QUIET"] = "1"
- for cpv in ebuilds:
- a = Atom("=" + cpv, allow_repo=True)
- repo = a.repo
- if repo is None:
- repo = "test_repo"
-
- repo_dir = self._get_repo_dir(repo)
- ebuild_dir = os.path.join(repo_dir, a.cp)
- ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
-
- portdb = self.trees[self.eroot]["porttree"].dbapi
- tmpsettings["O"] = ebuild_dir
- if not digestgen(mysettings=tmpsettings, myportdb=portdb):
- raise AssertionError(f"digest creation failed for {ebuild_path}")
+ for repo_name in self._repositories:
+ if repo_name == "DEFAULT":
+ continue
+ egencache_cmd = [
+ "egencache",
+ f"--repo={repo_name}",
+ "--update",
+ "--update-manifests",
+ "--sign-manifests=n",
+ "--strict-manifests=n",
+ f"--repositories-configuration={self.settings['PORTAGE_REPOSITORIES']}",
+ f"--jobs={portage.util.cpuinfo.get_cpu_count()}",
+ ]
+ result = subprocess.run(
+ egencache_cmd,
+ env=self.settings.environ(),
+ )
+ if result.returncode != os.EX_OK:
+ raise AssertionError(f"command failed: {egencache_cmd}")
def _create_binpkgs(self, binpkgs):
# When using BUILD_ID, there can be multiple instances for the
@@ -413,7 +420,7 @@ class ResolverPlayground:
vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
try:
os.makedirs(vdb_pkg_dir)
- except os.error:
+ except OSError:
pass
metadata = installed[cpv].copy()
@@ -459,7 +466,7 @@ class ResolverPlayground:
try:
os.makedirs(user_config_dir)
- except os.error:
+ except OSError:
pass
for repo in self._repositories:
@@ -641,7 +648,7 @@ class ResolverPlayground:
try:
os.makedirs(default_sets_conf_dir)
- except os.error:
+ except OSError:
pass
provided_sets_portage_conf = os.path.join(str(cnf_path), "sets", "portage.conf")
@@ -654,7 +661,7 @@ class ResolverPlayground:
try:
os.makedirs(set_config_dir)
- except os.error:
+ except OSError:
pass
for sets_file, lines in sets.items():
@@ -685,7 +692,7 @@ class ResolverPlayground:
create_trees_kwargs["target_root"] = self.target_root
env = {
- "PATH": os.environ["PATH"],
+ "PATH": f"{self.eprefix}/usr/sbin:{self.eprefix}/usr/bin:{os.environ['PATH']}",
"PORTAGE_REPOSITORIES": "\n".join(
"[%s]\n%s"
% (
@@ -733,7 +740,16 @@ class ResolverPlayground:
portage.util.noiselimit = -2
_emerge.emergelog._disable = True
- if action in ("depclean", "prune"):
+ # NOTE: frozen_config could be cached and reused if options and params were constant.
+ params_action = (
+ "remove" if action in ("dep_check", "depclean", "prune") else action
+ )
+ params = create_depgraph_params(options, params_action)
+ frozen_config = _frozen_depgraph_config(
+ self.settings, self.trees, options, params, None
+ )
+
+ if params_action == "remove":
depclean_result = _calc_depclean(
self.settings,
self.trees,
@@ -742,6 +758,7 @@ class ResolverPlayground:
action,
InternalPackageSet(initial_atoms=atoms, allow_wildcard=True),
None,
+ frozen_config=frozen_config,
)
result = ResolverPlaygroundDepcleanResult(
atoms,
@@ -752,9 +769,15 @@ class ResolverPlayground:
depclean_result.depgraph,
)
else:
- params = create_depgraph_params(options, action)
success, depgraph, favorites = backtrack_depgraph(
- self.settings, self.trees, options, params, action, atoms, None
+ self.settings,
+ self.trees,
+ options,
+ params,
+ action,
+ atoms,
+ None,
+ frozen_config=frozen_config,
)
depgraph._show_merge_list()
depgraph.display_problems()
@@ -940,7 +963,8 @@ class ResolverPlaygroundTestCase:
)
and expected is not None
):
- expected = set(expected)
+ # unsatisfied_deps can be a dict for depclean-like actions
+ expected = expected if isinstance(expected, dict) else set(expected)
elif key == "forced_rebuilds" and expected is not None:
expected = {k: set(v) for k, v in expected.items()}
@@ -1110,11 +1134,14 @@ class ResolverPlaygroundDepcleanResult:
"ordered",
"req_pkg_count",
"graph_order",
+ "unsatisfied_deps",
)
optional_checks = (
+ "cleanlist",
"ordered",
"req_pkg_count",
"graph_order",
+ "unsatisfied_deps",
)
def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count, depgraph):
@@ -1126,3 +1153,10 @@ class ResolverPlaygroundDepcleanResult:
self.graph_order = [
_mergelist_str(node, depgraph) for node in depgraph._dynamic_config.digraph
]
+ self.unsatisfied_deps = {}
+ for dep in depgraph._dynamic_config._initially_unsatisfied_deps:
+ if isinstance(dep.parent, Package):
+ parent_repr = dep.parent.cpv
+ else:
+ parent_repr = dep.parent.arg
+ self.unsatisfied_deps.setdefault(parent_repr, set()).add(dep.atom)
diff --git a/lib/portage/tests/resolver/meson.build b/lib/portage/tests/resolver/meson.build
index 770027ac4..8892c7813 100644
--- a/lib/portage/tests/resolver/meson.build
+++ b/lib/portage/tests/resolver/meson.build
@@ -15,12 +15,14 @@ py.install_sources(
'test_bdeps.py',
'test_binary_pkg_ebuild_visibility.py',
'test_blocker.py',
+ 'test_broken_deps.py',
'test_changed_deps.py',
'test_circular_choices.py',
'test_circular_choices_rust.py',
'test_circular_dependencies.py',
'test_complete_graph.py',
'test_complete_if_new_subslot_without_revbump.py',
+ 'test_cross_dep_priority.py',
'test_depclean.py',
'test_depclean_order.py',
'test_depclean_slot_unavailable.py',
diff --git a/lib/portage/tests/resolver/soname/test_skip_update.py b/lib/portage/tests/resolver/soname/test_skip_update.py
index 407c16a54..dc48a66f9 100644
--- a/lib/portage/tests/resolver/soname/test_skip_update.py
+++ b/lib/portage/tests/resolver/soname/test_skip_update.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2023 Gentoo Foundation
+# Copyright 2015-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -29,7 +29,10 @@ class SonameSkipUpdateTestCase(TestCase):
def testSonameSkipUpdate(self, backtrack=3):
binpkgs = {
"app-misc/A-1": {
- "RDEPEND": "dev-libs/B",
+ # Simulate injected libc dep which should not trigger
+ # reinstall due to use of strip_libc_deps in
+ # depgraph._eliminate_rebuilds dep comparison.
+ "RDEPEND": "dev-libs/B >=sys-libs/glibc-2.37",
"DEPEND": "dev-libs/B",
"REQUIRES": "x86_32: libB.so.1",
},
@@ -39,6 +42,10 @@ class SonameSkipUpdateTestCase(TestCase):
"dev-libs/B-1": {
"PROVIDES": "x86_32: libB.so.1",
},
+ "sys-libs/glibc-2.37-r7": {
+ "PROVIDES": "x86_32: libc.so.6",
+ },
+ "virtual/libc-1-r1": {"RDEPEND": "sys-libs/glibc"},
}
installed = {
@@ -50,6 +57,12 @@ class SonameSkipUpdateTestCase(TestCase):
"dev-libs/B-1": {
"PROVIDES": "x86_32: libB.so.1",
},
+ "sys-libs/glibc-2.37-r7": {
+ "PROVIDES": "x86_32: libc.so.6",
+ },
+ "virtual/libc-1-r1": {
+ "RDEPEND": "sys-libs/glibc",
+ },
}
world = ("app-misc/A",)
diff --git a/lib/portage/tests/resolver/test_broken_deps.py b/lib/portage/tests/resolver/test_broken_deps.py
new file mode 100644
index 000000000..8ca7809d3
--- /dev/null
+++ b/lib/portage/tests/resolver/test_broken_deps.py
@@ -0,0 +1,76 @@
+# Copyright 2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+
+class BrokenDepsTestCase(TestCase):
+ def testBrokenDeps(self):
+ """
+ Test the _calc_depclean "dep_check" action which will eventually
+ be used to check for unsatisfied deps of installed packages
+ for bug 921333.
+ """
+ ebuilds = {
+ "dev-qt/qtcore-5.15.12": {
+ "EAPI": "8",
+ },
+ "dev-qt/qtcore-5.15.11-r1": {
+ "EAPI": "8",
+ },
+ "dev-qt/qtxmlpatterns-5.15.12": {
+ "EAPI": "8",
+ "DEPEND": "=dev-qt/qtcore-5.15.12*",
+ "RDEPEND": "=dev-qt/qtcore-5.15.12*",
+ },
+ "dev-qt/qtxmlpatterns-5.15.11": {
+ "EAPI": "8",
+ "DEPEND": "=dev-qt/qtcore-5.15.11*",
+ "RDEPEND": "=dev-qt/qtcore-5.15.11*",
+ },
+ "kde-frameworks/syntax-highlighting-5.113.0": {
+ "EAPI": "8",
+ "DEPEND": ">=dev-qt/qtxmlpatterns-5.15.9:5",
+ },
+ }
+ installed = {
+ "dev-qt/qtcore-5.15.12": {
+ "EAPI": "8",
+ },
+ "dev-qt/qtxmlpatterns-5.15.11": {
+ "EAPI": "8",
+ "DEPEND": "=dev-qt/qtcore-5.15.11*",
+ "RDEPEND": "=dev-qt/qtcore-5.15.11*",
+ },
+ "kde-frameworks/syntax-highlighting-5.113.0": {
+ "EAPI": "8",
+ "DEPEND": ">=dev-qt/qtxmlpatterns-5.15.9:5",
+ },
+ }
+
+ world = ("kde-frameworks/syntax-highlighting",)
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ action="dep_check",
+ success=True,
+ unsatisfied_deps={
+ "dev-qt/qtxmlpatterns-5.15.11": {"=dev-qt/qtcore-5.15.11*"}
+ },
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world
+ )
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_cross_dep_priority.py b/lib/portage/tests/resolver/test_cross_dep_priority.py
new file mode 100644
index 000000000..10f2eb36e
--- /dev/null
+++ b/lib/portage/tests/resolver/test_cross_dep_priority.py
@@ -0,0 +1,164 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import subprocess
+import os
+
+import portage
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+
+class CrossDepPriorityTestCase(TestCase):
+ def testCrossDepPriority(self):
+ """
+ Test bug 919174, where cross-root merge to an empty root
+ failed due to circular dependencies.
+ """
+ ebuilds = {
+ "dev-lang/python-3.11.6": {
+ "EAPI": "8",
+ "DEPEND": "sys-apps/util-linux:=",
+ "RDEPEND": "sys-apps/util-linux:=",
+ },
+ "sys-apps/util-linux-2.38.1-r2": {
+ "EAPI": "8",
+ "DEPEND": "selinux? ( >=sys-libs/libselinux-2.2.2-r4 )",
+ "RDEPEND": "selinux? ( >=sys-libs/libselinux-2.2.2-r4 )",
+ "IUSE": "selinux",
+ },
+ "sys-libs/libselinux-3.5-r1": {
+ "EAPI": "8",
+ "DEPEND": "python? ( dev-lang/python )",
+ "RDEPEND": "python? ( dev-lang/python )",
+ "IUSE": "python",
+ },
+ "dev-libs/gmp-6.3.0": {
+ "EAPI": "8",
+ "SLOT": "0/10.4",
+ "DEPEND": "cxx? ( sys-devel/gcc )",
+ "RDEPEND": "cxx? ( sys-devel/gcc )",
+ "IUSE": "cxx",
+ },
+ "sys-devel/gcc-13.2.1_p20230826": {
+ "EAPI": "8",
+ "DEPEND": ">=dev-libs/gmp-4.3.2:0=",
+ "RDEPEND": ">=dev-libs/gmp-4.3.2:0=",
+ },
+ }
+
+ installed = {
+ "dev-lang/python-3.11.6": {
+ "EAPI": "8",
+ "KEYWORDS": "x86",
+ "DEPEND": "sys-apps/util-linux:0/0=",
+ "RDEPEND": "sys-apps/util-linux:0/0=",
+ },
+ "sys-apps/util-linux-2.38.1-r2": {
+ "EAPI": "8",
+ "KEYWORDS": "x86",
+ "DEPEND": "selinux? ( >=sys-libs/libselinux-2.2.2-r4 )",
+ "RDEPEND": "selinux? ( >=sys-libs/libselinux-2.2.2-r4 )",
+ "IUSE": "selinux",
+ "USE": "selinux",
+ },
+ "sys-libs/libselinux-3.5-r1": {
+ "EAPI": "8",
+ "KEYWORDS": "x86",
+ "DEPEND": "python? ( dev-lang/python )",
+ "RDEPEND": "python? ( dev-lang/python )",
+ "IUSE": "python",
+ "USE": "python",
+ },
+ "dev-libs/gmp-6.3.0": {
+ "EAPI": "8",
+ "KEYWORDS": "x86",
+ "SLOT": "0/10.4",
+ "DEPEND": "cxx? ( sys-devel/gcc )",
+ "RDEPEND": "cxx? ( sys-devel/gcc )",
+ "IUSE": "cxx",
+ "USE": "cxx",
+ },
+ "sys-devel/gcc-13.2.1_p20230826": {
+ "EAPI": "8",
+ "KEYWORDS": "x86",
+ "DEPEND": ">=dev-libs/gmp-4.3.2:0/10.4=",
+ "RDEPEND": ">=dev-libs/gmp-4.3.2:0/10.4=",
+ },
+ }
+
+ world = [
+ "sys-apps/util-linux",
+ "sys-devel/gcc",
+ ]
+
+ user_config = {
+ "make.conf": ('USE="cxx python selinux"',),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={"--emptytree": True},
+ success=True,
+ mergelist=[
+ "dev-libs/gmp-6.3.0",
+ "sys-devel/gcc-13.2.1_p20230826",
+ "sys-apps/util-linux-2.38.1-r2",
+ "dev-lang/python-3.11.6",
+ "sys-libs/libselinux-3.5-r1",
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds,
+ installed=installed,
+ world=world,
+ user_config=user_config,
+ )
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+
+ # Since ResolverPlayground does not internally support
+ # cross-root, test with emerge.
+ cross_root = os.path.join(playground.settings["EPREFIX"], "cross_root")
+ world_file = os.path.join(
+ cross_root,
+ playground.settings["EPREFIX"].lstrip(os.sep),
+ portage.const.WORLD_FILE,
+ )
+ os.makedirs(os.path.dirname(world_file))
+ shutil.copy(
+ os.path.join(playground.settings["EPREFIX"], portage.const.WORLD_FILE),
+ world_file,
+ )
+ result = subprocess.run(
+ [
+ "emerge",
+ f"--root={cross_root}",
+ "--pretend",
+ "--verbose",
+ "--usepkgonly",
+ "--quickpkg-direct=y",
+ "@world",
+ ],
+ env=playground.settings.environ(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ output = result.stdout.decode(errors="replace")
+ try:
+ self.assertTrue("5 packages (5 new, 5 binaries)" in output)
+ self.assertEqual(result.returncode, os.EX_OK)
+ except Exception:
+ print(output)
+ raise
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_depclean_order.py b/lib/portage/tests/resolver/test_depclean_order.py
index 08dd249e1..36d60d44e 100644
--- a/lib/portage/tests/resolver/test_depclean_order.py
+++ b/lib/portage/tests/resolver/test_depclean_order.py
@@ -1,4 +1,4 @@
-# Copyright 2013 Gentoo Foundation
+# Copyright 2013-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -57,3 +57,118 @@ class SimpleDepcleanTestCase(TestCase):
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
+
+ def testIDEPENDDepclean(self):
+ """
+ Test for bug 916135, where a direct circular dependency caused
+ the unmerge order to fail to account for IDEPEND.
+ """
+
+ ebuilds = {
+ "dev-util/A-1": {},
+ "dev-libs/B-1": {
+ "EAPI": "8",
+ "IDEPEND": "dev-util/A",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ installed = {
+ "dev-util/A-1": {},
+ "dev-libs/B-1": {
+ "EAPI": "8",
+ "IDEPEND": "dev-util/A",
+ "RDEPEND": "dev-libs/B:0/0=",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ world = ("dev-libs/C",)
+
+ test_cases = (
+ # Remove dev-libs/B first because it IDEPENDs on dev-util/A
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ ordered=True,
+ cleanlist=[
+ "dev-libs/B-1",
+ "dev-util/A-1",
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world
+ )
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testCircularDepclean(self):
+ """
+ Test for bug 916135, where an indirect circular dependency caused
+ the unmerge order to fail to account for IDEPEND.
+ """
+
+ ebuilds = {
+ "dev-util/A-1": {},
+ "dev-libs/B-1": {
+ "EAPI": "8",
+ "SLOT": "1",
+ "IDEPEND": "dev-util/A",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/B-2": {
+ "EAPI": "8",
+ "SLOT": "2",
+ "IDEPEND": "dev-util/A",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ installed = {
+ "dev-util/A-1": {},
+ "dev-libs/B-1": {
+ "EAPI": "8",
+ "SLOT": "1",
+ "IDEPEND": "dev-util/A",
+ "RDEPEND": "dev-libs/B:2/2=",
+ },
+ "dev-libs/B-2": {
+ "EAPI": "8",
+ "SLOT": "2",
+ "IDEPEND": "dev-util/A",
+ "RDEPEND": "dev-libs/B:1/1=",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ world = ("dev-libs/C",)
+
+ test_cases = (
+ # Remove dev-libs/B first because it IDEPENDs on dev-util/A
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ ordered=True,
+ cleanlist=["dev-libs/B-2", "dev-libs/B-1", "dev-util/A-1"],
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world
+ )
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_eapi.py b/lib/portage/tests/resolver/test_eapi.py
index 5d425ccdb..32dcb4989 100644
--- a/lib/portage/tests/resolver/test_eapi.py
+++ b/lib/portage/tests/resolver/test_eapi.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2020 Gentoo Authors
+# Copyright 2010-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -199,7 +199,7 @@ class EAPITestCase(TestCase):
mergelist=["dev-libs/A-1.0", "dev-libs/B-1.0"],
)
- playground = ResolverPlayground(ebuilds=ebuilds, debug=True)
+ playground = ResolverPlayground(ebuilds=ebuilds)
try:
playground.run_TestCase(test_case)
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
diff --git a/lib/portage/tests/sets/base/test_variable_set.py b/lib/portage/tests/sets/base/test_variable_set.py
index 506104667..9e90ee6dd 100644
--- a/lib/portage/tests/sets/base/test_variable_set.py
+++ b/lib/portage/tests/sets/base/test_variable_set.py
@@ -12,6 +12,9 @@ class VariableSetTestCase(TestCase):
def testVariableSetEmerge(self):
ebuilds = {
"dev-go/go-pkg-1": {"BDEPEND": "dev-lang/go"},
+ "www-client/firefox-1": {
+ "BDEPEND": "|| ( virtual/rust:0/a virtual/rust:0/b )"
+ },
}
installed = ebuilds
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
@@ -22,6 +25,11 @@ class VariableSetTestCase(TestCase):
mergelist=["dev-go/go-pkg-1"],
success=True,
),
+ ResolverPlaygroundTestCase(
+ ["@rust-rebuild"],
+ mergelist=["www-client/firefox-1"],
+ success=True,
+ ),
)
try:
diff --git a/lib/portage/tests/sets/files/test_config_file_set.py b/lib/portage/tests/sets/files/test_config_file_set.py
index 81419df4a..fdb28da7f 100644
--- a/lib/portage/tests/sets/files/test_config_file_set.py
+++ b/lib/portage/tests/sets/files/test_config_file_set.py
@@ -1,5 +1,5 @@
# testConfigFileSet.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -13,6 +13,7 @@ class ConfigFileSetTestCase(TestCase):
"""Simple Test Case for ConfigFileSet"""
def setUp(self):
+ super().setUp()
fd, self.testfile = tempfile.mkstemp(
suffix=".testdata", prefix=self.__class__.__name__, text=True
)
diff --git a/lib/portage/tests/sets/files/test_static_file_set.py b/lib/portage/tests/sets/files/test_static_file_set.py
index a4e6c29c2..e8f51ca20 100644
--- a/lib/portage/tests/sets/files/test_static_file_set.py
+++ b/lib/portage/tests/sets/files/test_static_file_set.py
@@ -1,5 +1,5 @@
# testStaticFileSet.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -13,6 +13,7 @@ class StaticFileSetTestCase(TestCase):
"""Simple Test Case for StaticFileSet"""
def setUp(self):
+ super().setUp()
fd, self.testfile = tempfile.mkstemp(
suffix=".testdata", prefix=self.__class__.__name__, text=True
)
diff --git a/lib/portage/tests/sets/shell/test_shell.py b/lib/portage/tests/sets/shell/test_shell.py
index f30b72a8b..15f8907df 100644
--- a/lib/portage/tests/sets/shell/test_shell.py
+++ b/lib/portage/tests/sets/shell/test_shell.py
@@ -1,5 +1,5 @@
# testCommandOututSet.py -- Portage Unit Testing Functionality
-# Copyright 2007-2020 Gentoo Authors
+# Copyright 2007-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.process import find_binary
@@ -11,7 +11,7 @@ class CommandOutputSetTestCase(TestCase):
"""Simple Test Case for CommandOutputSet"""
def setUp(self):
- pass
+ super().setUp()
def tearDown(self):
pass
diff --git a/lib/portage/tests/sync/test_sync_local.py b/lib/portage/tests/sync/test_sync_local.py
index 339d37c25..aeeb5d0b1 100644
--- a/lib/portage/tests/sync/test_sync_local.py
+++ b/lib/portage/tests/sync/test_sync_local.py
@@ -387,7 +387,7 @@ class SyncLocalTestCase(TestCase):
"GENTOO_COMMITTER_NAME": committer_name,
"GENTOO_COMMITTER_EMAIL": committer_email,
"HOME": homedir,
- "PATH": os.environ["PATH"],
+ "PATH": settings["PATH"],
"PORTAGE_GRPNAME": os.environ["PORTAGE_GRPNAME"],
"PORTAGE_USERNAME": os.environ["PORTAGE_USERNAME"],
"PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
diff --git a/lib/portage/tests/update/test_move_ent.py b/lib/portage/tests/update/test_move_ent.py
index 22d0c8feb..0b938dd28 100644
--- a/lib/portage/tests/update/test_move_ent.py
+++ b/lib/portage/tests/update/test_move_ent.py
@@ -1,9 +1,9 @@
-# Copyright 2012-2021 Gentoo Authors
+# Copyright 2012-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import sys
import textwrap
-
+import pytest
import portage
from portage import os
from portage.const import SUPPORTED_GENTOO_BINPKG_FORMATS
@@ -122,3 +122,204 @@ class MoveEntTestCase(TestCase):
finally:
playground.cleanup()
+
+ def testMoveEntWithSignature(self):
+ ebuilds = {
+ "dev-libs/A-2::dont_apply_updates": {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1::test_repo": {
+ "EAPI": "4",
+ },
+ "dev-libs/A-2::dont_apply_updates": {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/A-1::test_repo": {
+ "EAPI": "4",
+ },
+ "dev-libs/A-2::dont_apply_updates": {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ }
+
+ updates = textwrap.dedent(
+ """
+ move dev-libs/A dev-libs/A-moved
+ """
+ )
+
+ for binpkg_format in ("gpkg",):
+ with self.subTest(binpkg_format=binpkg_format):
+ print(colorize("HILITE", binpkg_format), end=" ... ")
+ sys.stdout.flush()
+ playground = ResolverPlayground(
+ binpkgs=binpkgs,
+ ebuilds=ebuilds,
+ installed=installed,
+ user_config={
+ "make.conf": (f'BINPKG_FORMAT="{binpkg_format}"',),
+ },
+ )
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), "w") as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(
+ os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles",
+ "updates",
+ )
+ )
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # A -> A-moved
+ self.assertRaises(KeyError, vardb.aux_get, "dev-libs/A-1", ["EAPI"])
+ vardb.aux_get("dev-libs/A-moved-1", ["EAPI"])
+ # The original package should still exist because a binary
+ # package move is a copy on write operation.
+ bindb.aux_get("dev-libs/A-1", ["EAPI"])
+ print(bindb.aux_get("dev-libs/A-1", "PF"))
+ self.assertRaises(
+ KeyError, bindb.aux_get, "dev-libs/A-moved-1", ["EAPI"]
+ )
+
+ # dont_apply_updates
+ self.assertRaises(
+ KeyError, vardb.aux_get, "dev-libs/A-moved-2", ["EAPI"]
+ )
+ vardb.aux_get("dev-libs/A-2", ["EAPI"])
+ self.assertRaises(
+ KeyError, bindb.aux_get, "dev-libs/A-moved-2", ["EAPI"]
+ )
+ bindb.aux_get("dev-libs/A-2", ["EAPI"])
+
+ finally:
+ playground.cleanup()
+
+ # Ignore "The loop argument is deprecated" since this argument is conditionally
+ # added to asyncio.Lock as needed for compatibility with python 3.9.
+ @pytest.mark.filterwarnings("ignore:The loop argument is deprecated")
+ @pytest.mark.filterwarnings("error")
+ def testMoveEntWithCorruptIndex(self):
+ """
+ Test handling of the Packages index being stale (bug #920828)
+ and gpkg's binpkg-multi-instance handling.
+
+ We expect a UserWarning to be thrown if the gpkg structure is broken,
+ so we promote that to an error.
+ """
+ ebuilds = {
+ "dev-libs/A-moved-1::test_repo": {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ "dev-libs/B-1::test_repo": {"EAPI": "4", "RDEPEND": "dev-libs/A-moved"},
+ }
+
+ installed = {
+ "dev-libs/A-1::test_repo": {
+ "EAPI": "4",
+ },
+ "dev-libs/B-1::test_repo": {"EAPI": "4", "RDEPEND": "dev-libs/A"},
+ }
+
+ binpkgs = {
+ "dev-libs/A-1::test_repo": {
+ "EAPI": "4",
+ "BUILD_ID": "1",
+ },
+ "dev-libs/B-1::test_repo": {
+ "EAPI": "4",
+ "BUILD_ID": "1",
+ "RDEPEND": "dev-libs/A",
+ },
+ }
+
+ updates = textwrap.dedent(
+ """
+ move dev-libs/A dev-libs/A-moved
+ """
+ )
+
+ for binpkg_format in ("gpkg",):
+ with self.subTest(binpkg_format=binpkg_format):
+ print(colorize("HILITE", binpkg_format), end=" ... ")
+ sys.stdout.flush()
+ playground = ResolverPlayground(
+ binpkgs=binpkgs,
+ ebuilds=ebuilds,
+ installed=installed,
+ user_config={
+ "make.conf": (
+ f'BINPKG_FORMAT="{binpkg_format}"',
+ f'FEATURES="binpkg-multi-instance pkgdir-index-trusted"',
+ ),
+ },
+ debug=False,
+ )
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), "w") as f:
+ f.write(updates)
+
+ # Make the Packages index out-of-date
+ os.remove(
+ os.path.join(
+ bindb.bintree.pkgdir, "dev-libs", "A", "A-1-1.gpkg.tar"
+ )
+ )
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/update/test_move_slot_ent.py b/lib/portage/tests/update/test_move_slot_ent.py
index 88d9802cf..62b5c3544 100644
--- a/lib/portage/tests/update/test_move_slot_ent.py
+++ b/lib/portage/tests/update/test_move_slot_ent.py
@@ -1,4 +1,4 @@
-# Copyright 2012-2019 Gentoo Authors
+# Copyright 2012-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -86,7 +86,10 @@ class MoveSlotEntTestCase(TestCase):
ebuilds=ebuilds,
installed=installed,
user_config={
- "make.conf": (f'BINPKG_FORMAT="{binpkg_format}"',),
+ "make.conf": (
+ f'BINPKG_FORMAT="{binpkg_format}"',
+ 'FEATURES="-binpkg-signing"',
+ ),
},
)
@@ -154,3 +157,153 @@ class MoveSlotEntTestCase(TestCase):
finally:
playground.cleanup()
+
+ def testMoveSlotEntWithSignature(self):
+ ebuilds = {
+ "dev-libs/A-2::dont_apply_updates": {
+ "EAPI": "5",
+ "SLOT": "0/2.30",
+ },
+ "dev-libs/B-2::dont_apply_updates": {
+ "SLOT": "0",
+ },
+ "dev-libs/C-2.1::dont_apply_updates": {
+ "EAPI": "5",
+ "SLOT": "0/2.1",
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1::test_repo": {
+ "EAPI": "5",
+ "SLOT": "0/2.30",
+ },
+ "dev-libs/B-1::test_repo": {
+ "SLOT": "0",
+ },
+ "dev-libs/C-1::test_repo": {
+ "EAPI": "5",
+ "SLOT": "0/1",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/A-1::test_repo": {
+ "EAPI": "5",
+ "SLOT": "0/2.30",
+ },
+ "dev-libs/A-2::dont_apply_updates": {
+ "EAPI": "5",
+ "SLOT": "0/2.30",
+ },
+ "dev-libs/B-1::test_repo": {
+ "SLOT": "0",
+ },
+ "dev-libs/B-2::dont_apply_updates": {
+ "SLOT": "0",
+ },
+ "dev-libs/C-1::test_repo": {
+ "EAPI": "5",
+ "SLOT": "0/1",
+ },
+ "dev-libs/C-2.1::dont_apply_updates": {
+ "EAPI": "5",
+ "SLOT": "0/2.1",
+ },
+ }
+
+ updates = textwrap.dedent(
+ """
+ slotmove dev-libs/A 0 2
+ slotmove dev-libs/B 0 1
+ slotmove dev-libs/C 0 1
+ """
+ )
+
+ for binpkg_format in ("gpkg",):
+ with self.subTest(binpkg_format=binpkg_format):
+ print(colorize("HILITE", binpkg_format), end=" ... ")
+ sys.stdout.flush()
+ playground = ResolverPlayground(
+ binpkgs=binpkgs,
+ ebuilds=ebuilds,
+ installed=installed,
+ user_config={
+ "make.conf": (
+ f'BINPKG_FORMAT="{binpkg_format}"',
+ 'FEATURES="binpkg-signing"',
+ ),
+ },
+ )
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), "w") as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(
+ os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles",
+ "updates",
+ )
+ )
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # 0/2.30 -> 2/2.30
+ self.assertEqual(
+ "2/2.30", vardb.aux_get("dev-libs/A-1", ["SLOT"])[0]
+ )
+
+ # Stale signed packages removed since a7bbb4fc4d38.
+ self.assertRaises(KeyError, bindb.aux_get, "dev-libs/A-1", ["SLOT"])
+ # self.assertEqual(
+ # "0/2.30", bindb.aux_get("dev-libs/A-1", ["SLOT"])[0]
+ # )
+
+ # 0 -> 1
+ self.assertEqual("1", vardb.aux_get("dev-libs/B-1", ["SLOT"])[0])
+ # Stale signed packages removed since a7bbb4fc4d38.
+ self.assertRaises(KeyError, bindb.aux_get, "dev-libs/B-1", ["SLOT"])
+ # self.assertEqual("0", bindb.aux_get("dev-libs/B-1", ["SLOT"])[0])
+
+ # 0/1 -> 1 (equivalent to 1/1)
+ self.assertEqual("1", vardb.aux_get("dev-libs/C-1", ["SLOT"])[0])
+ # Stale signed packages removed since a7bbb4fc4d38.
+ self.assertRaises(KeyError, bindb.aux_get, "dev-libs/C-1", ["SLOT"])
+ # self.assertEqual("0/1", bindb.aux_get("dev-libs/C-1", ["SLOT"])[0])
+
+ # dont_apply_updates
+ self.assertEqual(
+ "0/2.30", bindb.aux_get("dev-libs/A-2", ["SLOT"])[0]
+ )
+ self.assertEqual("0", bindb.aux_get("dev-libs/B-2", ["SLOT"])[0])
+ self.assertEqual(
+ "0/2.1", bindb.aux_get("dev-libs/C-2.1", ["SLOT"])[0]
+ )
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/update/test_update_dbentry.py b/lib/portage/tests/update/test_update_dbentry.py
index a473cd937..3b3f0caae 100644
--- a/lib/portage/tests/update/test_update_dbentry.py
+++ b/lib/portage/tests/update/test_update_dbentry.py
@@ -1,6 +1,7 @@
-# Copyright 2012-2013 Gentoo Foundation
+# Copyright 2012-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import shutil
import sys
import re
import textwrap
@@ -9,6 +10,7 @@ import portage
from portage import os
from portage.const import SUPPORTED_GENTOO_BINPKG_FORMATS
from portage.dep import Atom
+from portage.exception import CorruptionKeyError
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
from portage.update import update_dbentry
@@ -186,6 +188,11 @@ class UpdateDbentryTestCase(TestCase):
"EAPI": "4",
"SLOT": "2",
},
+ "dev-libs/B-2::test_repo": {
+ "SLOT": "2",
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
"dev-libs/B-1::test_repo": {
"RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
"EAPI": "4",
@@ -215,6 +222,11 @@ class UpdateDbentryTestCase(TestCase):
"RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
"EAPI": "4",
},
+ "dev-libs/B-2::test_repo": {
+ "SLOT": "2",
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
}
world = ["dev-libs/M", "dev-libs/N"]
@@ -235,7 +247,10 @@ class UpdateDbentryTestCase(TestCase):
installed=installed,
world=world,
user_config={
- "make.conf": (f'BINPKG_FORMAT="{binpkg_format}"',),
+ "make.conf": (
+ f'BINPKG_FORMAT="{binpkg_format}"',
+ 'FEATURES="-binpkg-signing"',
+ ),
},
)
@@ -266,6 +281,34 @@ class UpdateDbentryTestCase(TestCase):
)
)
+ # Delete some things in order to trigger CorruptionKeyError during package moves.
+ corruption_atom = Atom("dev-libs/B:2")
+ # Demonstrate initial state.
+ self.assertEqual(bindb.match(corruption_atom), ["dev-libs/B-2"])
+ for cpv in bindb.match(corruption_atom):
+ os.unlink(bindb.bintree.getname(cpv))
+ self.assertRaises(
+ CorruptionKeyError,
+ bindb.aux_update,
+ cpv,
+ {"RDEPEND": "dev-libs/M-moved"},
+ )
+ # Demonstrate corrupt state.
+ self.assertEqual(bindb.match(corruption_atom), ["dev-libs/B-2"])
+
+ # Demonstrate initial state.
+ self.assertEqual(vardb.match(corruption_atom), ["dev-libs/B-2"])
+ for cpv in vardb.match(corruption_atom):
+ shutil.rmtree(vardb.getpath(cpv))
+ self.assertRaises(
+ CorruptionKeyError,
+ vardb.aux_update,
+ cpv,
+ {"RDEPEND": "dev-libs/M-moved"},
+ )
+ # Demonstrate correct state because vardbapi checks the disk.
+ self.assertEqual(vardb.match(corruption_atom), [])
+
global_noiselimit = portage.util.noiselimit
portage.util.noiselimit = -2
try:
@@ -301,6 +344,176 @@ class UpdateDbentryTestCase(TestCase):
self.assertTrue("dev-libs/M" in rdepend)
self.assertTrue("dev-libs/M-moved" not in rdepend)
+ # Demonstrate that match still returns stale results
+ # due to intentional corruption.
+ self.assertEqual(bindb.match(corruption_atom), ["dev-libs/B-2"])
+
+ # Update bintree state so aux_get will properly raise KeyError.
+ for cpv in bindb.match(corruption_atom):
+ # Demonstrate that aux_get returns stale results.
+ self.assertEqual(
+ ["dev-libs/M dev-libs/N dev-libs/P"],
+ bindb.aux_get(cpv, ["RDEPEND"]),
+ )
+ bindb.bintree.remove(cpv)
+ self.assertEqual(bindb.match(corruption_atom), [])
+ self.assertRaises(
+ KeyError, bindb.aux_get, "dev-libs/B-2", ["RDEPEND"]
+ )
+ self.assertRaises(
+ KeyError, vardb.aux_get, "dev-libs/B-2", ["RDEPEND"]
+ )
+
+ selected_set.load()
+ self.assertTrue("dev-libs/M" not in selected_set)
+ self.assertTrue("dev-libs/M-moved" in selected_set)
+
+ finally:
+ playground.cleanup()
+
+ def testUpdateDbentryDbapiTestCaseWithSignature(self):
+ ebuilds = {
+ "dev-libs/A-2::dont_apply_updates": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ "dev-libs/B-2::dont_apply_updates": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1::test_repo": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+ "dev-libs/A-2::dont_apply_updates": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ "dev-libs/B-1::test_repo": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+ "dev-libs/M-1::test_repo": {
+ "EAPI": "4",
+ },
+ "dev-libs/N-1::test_repo": {
+ "EAPI": "4",
+ },
+ "dev-libs/N-2::test_repo": {
+ "EAPI": "4",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/A-1::test_repo": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+ "dev-libs/A-2::dont_apply_updates": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+ "dev-libs/B-1::test_repo": {
+ "RDEPEND": "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+ }
+
+ world = ["dev-libs/M", "dev-libs/N"]
+
+ updates = textwrap.dedent(
+ """
+ move dev-libs/M dev-libs/M-moved
+ """
+ )
+
+ for binpkg_format in ("gpkg",):
+ with self.subTest(binpkg_format=binpkg_format):
+ print(colorize("HILITE", binpkg_format), end=" ... ")
+ sys.stdout.flush()
+ playground = ResolverPlayground(
+ binpkgs=binpkgs,
+ ebuilds=ebuilds,
+ installed=installed,
+ world=world,
+ user_config={
+ "make.conf": (f'BINPKG_FORMAT="{binpkg_format}"',),
+ },
+ )
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ setconfig = trees[eroot]["root_config"].setconfig
+ selected_set = setconfig.getSets()["selected"]
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), "w") as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(
+ os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles",
+ "updates",
+ )
+ )
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # M -> M-moved
+ old_pattern = re.compile(r"\bdev-libs/M(\s|$)")
+ rdepend = vardb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ # Stale signed packages removed since a7bbb4fc4d38.
+ self.assertRaises(
+ KeyError, bindb.aux_get, "dev-libs/A-1", ["RDEPEND"]
+ )
+ # rdepend = bindb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ # self.assertFalse(old_pattern.search(rdepend) is None)
+ # self.assertFalse("dev-libs/M-moved" in rdepend)
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+
+ # dont_apply_updates
+ rdepend = vardb.aux_get("dev-libs/A-2", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/M" in rdepend)
+ self.assertTrue("dev-libs/M-moved" not in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-2", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/M" in rdepend)
+ self.assertTrue("dev-libs/M-moved" not in rdepend)
+
selected_set.load()
self.assertTrue("dev-libs/M" not in selected_set)
self.assertTrue("dev-libs/M-moved" in selected_set)
diff --git a/lib/portage/tests/util/dyn_libs/meson.build b/lib/portage/tests/util/dyn_libs/meson.build
index ddb08f5b1..8f2c919c1 100644
--- a/lib/portage/tests/util/dyn_libs/meson.build
+++ b/lib/portage/tests/util/dyn_libs/meson.build
@@ -1,5 +1,6 @@
py.install_sources(
[
+ 'test_installed_dynlibs.py',
'test_soname_deps.py',
'__init__.py',
'__test__.py',
diff --git a/lib/portage/tests/util/dyn_libs/test_installed_dynlibs.py b/lib/portage/tests/util/dyn_libs/test_installed_dynlibs.py
new file mode 100644
index 000000000..421dcf606
--- /dev/null
+++ b/lib/portage/tests/util/dyn_libs/test_installed_dynlibs.py
@@ -0,0 +1,65 @@
+# Copyright 2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import tempfile
+
+from portage.const import BASH_BINARY
+from portage.tests import TestCase
+from portage.util import ensure_dirs
+from portage.util._dyn_libs.dyn_libs import installed_dynlibs
+from portage.util.file_copy import copyfile
+
+
+class InstalledDynlibsTestCase(TestCase):
+ def testInstalledDynlibsRegular(self):
+ """
+ Return True for *.so regular files.
+ """
+ with tempfile.TemporaryDirectory() as directory:
+ bash_copy = os.path.join(directory, "lib", "libfoo.so")
+ ensure_dirs(os.path.dirname(bash_copy))
+ copyfile(BASH_BINARY, bash_copy)
+ self.assertTrue(installed_dynlibs(directory))
+
+ def testInstalledDynlibsOnlySymlink(self):
+ """
+ If a *.so symlink is installed but does not point to a regular
+ file inside the top directory, installed_dynlibs should return
+ False (bug 921170).
+ """
+ with tempfile.TemporaryDirectory() as directory:
+ symlink_path = os.path.join(directory, "lib", "libfoo.so")
+ ensure_dirs(os.path.dirname(symlink_path))
+ os.symlink(BASH_BINARY, symlink_path)
+ self.assertFalse(installed_dynlibs(directory))
+
+ def testInstalledDynlibsSymlink(self):
+ """
+ Return True for a *.so symlink pointing to a regular file inside
+ the top directory.
+ """
+ with tempfile.TemporaryDirectory() as directory:
+ bash_copy = os.path.join(directory, BASH_BINARY.lstrip(os.sep))
+ ensure_dirs(os.path.dirname(bash_copy))
+ copyfile(BASH_BINARY, bash_copy)
+ symlink_path = os.path.join(directory, "lib", "libfoo.so")
+ ensure_dirs(os.path.dirname(symlink_path))
+ os.symlink(bash_copy, symlink_path)
+ self.assertTrue(installed_dynlibs(directory))
+
+ def testInstalledDynlibsAbsoluteSymlink(self):
+ """
+ If a *.so symlink target is outside of the top directory,
+ traversal follows the corresponding file inside the top
+ directory if it exists, and otherwise stops following the
+ symlink.
+ """
+ with tempfile.TemporaryDirectory() as directory:
+ bash_copy = os.path.join(directory, BASH_BINARY.lstrip(os.sep))
+ ensure_dirs(os.path.dirname(bash_copy))
+ copyfile(BASH_BINARY, bash_copy)
+ symlink_path = os.path.join(directory, "lib", "libfoo.so")
+ ensure_dirs(os.path.dirname(symlink_path))
+ os.symlink(BASH_BINARY, symlink_path)
+ self.assertTrue(installed_dynlibs(directory))
diff --git a/lib/portage/tests/util/futures/asyncio/meson.build b/lib/portage/tests/util/futures/asyncio/meson.build
index ba727052f..2de0668d6 100644
--- a/lib/portage/tests/util/futures/asyncio/meson.build
+++ b/lib/portage/tests/util/futures/asyncio/meson.build
@@ -1,6 +1,5 @@
py.install_sources(
[
- 'test_child_watcher.py',
'test_event_loop_in_fork.py',
'test_pipe_closed.py',
'test_policy_wrapper_recursion.py',
diff --git a/lib/portage/tests/util/futures/asyncio/test_child_watcher.py b/lib/portage/tests/util/futures/asyncio/test_child_watcher.py
deleted file mode 100644
index cd100598b..000000000
--- a/lib/portage/tests/util/futures/asyncio/test_child_watcher.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2018-2021 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-import os
-
-from portage.process import find_binary, spawn
-from portage.tests import TestCase
-from portage.util._eventloop.global_event_loop import global_event_loop
-from portage.util.futures import asyncio
-from portage.util.futures.unix_events import DefaultEventLoopPolicy
-
-
-class ChildWatcherTestCase(TestCase):
- def testChildWatcher(self):
- true_binary = find_binary("true")
- self.assertNotEqual(true_binary, None)
-
- initial_policy = asyncio.get_event_loop_policy()
- if not isinstance(initial_policy, DefaultEventLoopPolicy):
- asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
-
- loop = None
- try:
- try:
- asyncio.set_child_watcher(None)
- except NotImplementedError:
- pass
- else:
- self.assertTrue(False)
-
- args_tuple = ("hello", "world")
-
- loop = asyncio._wrap_loop()
- future = loop.create_future()
-
- def callback(pid, returncode, *args):
- future.set_result((pid, returncode, args))
-
- async def watch_pid():
- with asyncio.get_child_watcher() as watcher:
- pids = spawn([true_binary], returnpid=True)
- watcher.add_child_handler(pids[0], callback, *args_tuple)
- self.assertEqual((await future), (pids[0], os.EX_OK, args_tuple))
-
- loop.run_until_complete(watch_pid())
- finally:
- asyncio.set_event_loop_policy(initial_policy)
- if loop not in (None, global_event_loop()):
- loop.close()
- self.assertFalse(global_event_loop().is_closed())
diff --git a/lib/portage/tests/util/futures/test_retry.py b/lib/portage/tests/util/futures/test_retry.py
index a5b56bdc7..2197f6697 100644
--- a/lib/portage/tests/util/futures/test_retry.py
+++ b/lib/portage/tests/util/futures/test_retry.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2023 Gentoo Authors
+# Copyright 2018-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from concurrent.futures import Future, ThreadPoolExecutor
@@ -9,7 +9,6 @@ import threading
import weakref
import time
-import portage
from portage.tests import TestCase
from portage.util._eventloop.global_event_loop import global_event_loop
from portage.util.backoff import RandomExponentialBackoff
@@ -222,6 +221,7 @@ class RetryForkExecutorTestCase(RetryTestCase):
self._executor = None
def setUp(self):
+ super().setUp()
self._setUpExecutor()
def tearDown(self):
@@ -229,16 +229,19 @@ class RetryForkExecutorTestCase(RetryTestCase):
@contextlib.contextmanager
def _wrap_coroutine_func(self, coroutine_func):
+ uses_subprocess = isinstance(self._executor, ForkExecutor)
parent_loop = global_event_loop()
- parent_pid = portage.getpid()
pending = weakref.WeakValueDictionary()
# Since ThreadPoolExecutor does not propagate cancellation of a
# parent_future to the underlying coroutine, use kill_switch to
# propagate task cancellation to wrapper, so that HangForever's
# thread returns when retry eventually cancels parent_future.
- def wrapper(kill_switch):
- if portage.getpid() == parent_pid:
+ if uses_subprocess:
+ wrapper = _run_coroutine_in_subprocess(coroutine_func)
+ else:
+
+ def wrapper(kill_switch):
# thread in main process
def done_callback(result):
result.cancelled() or result.exception() or result.result()
@@ -262,22 +265,19 @@ class RetryForkExecutorTestCase(RetryTestCase):
else:
return future.result().result()
- # child process
- loop = global_event_loop()
- try:
- return loop.run_until_complete(coroutine_func())
- finally:
- loop.close()
-
def execute_wrapper():
- kill_switch = threading.Event()
+ # Use kill_switch for threads because they can't be killed
+ # like processes. Do not pass kill_switch to subprocesses
+ # because it is not picklable.
+ kill_switch = None if uses_subprocess else threading.Event()
+ wrapper_args = [kill_switch] if kill_switch else []
parent_future = asyncio.ensure_future(
- parent_loop.run_in_executor(self._executor, wrapper, kill_switch),
+ parent_loop.run_in_executor(self._executor, wrapper, *wrapper_args),
loop=parent_loop,
)
def kill_callback(parent_future):
- if not kill_switch.is_set():
+ if kill_switch is not None and not kill_switch.is_set():
kill_switch.set()
parent_future.add_done_callback(kill_callback)
@@ -298,6 +298,19 @@ class RetryForkExecutorTestCase(RetryTestCase):
future.cancelled() or future.exception() or future.result()
+class _run_coroutine_in_subprocess:
+ def __init__(self, coroutine_func):
+ self._coroutine_func = coroutine_func
+
+ def __call__(self):
+ # child process
+ loop = global_event_loop()
+ try:
+ return loop.run_until_complete(self._coroutine_func())
+ finally:
+ loop.close()
+
+
class RetryThreadExecutorTestCase(RetryForkExecutorTestCase):
def _setUpExecutor(self):
self._executor = ThreadPoolExecutor(max_workers=1)
diff --git a/lib/portage/tests/util/test_manifest.py b/lib/portage/tests/util/test_manifest.py
index 49bcbc1a5..2d41b9fc9 100644
--- a/lib/portage/tests/util/test_manifest.py
+++ b/lib/portage/tests/util/test_manifest.py
@@ -1,6 +1,7 @@
-# Copyright 2022 Gentoo Authors
+# Copyright 2022-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import shutil
import tempfile
from pathlib import Path
@@ -10,7 +11,8 @@ from portage.tests import TestCase
class ManifestTestCase(TestCase):
def test_simple_addFile(self):
- tempdir = Path(tempfile.mkdtemp()) / "app-portage" / "diffball"
+ base_tempdir = tempfile.mkdtemp()
+ tempdir = Path(base_tempdir) / "app-portage" / "diffball"
manifest = Manifest(str(tempdir), required_hashes=["SHA512", "BLAKE2B"])
(tempdir / "files").mkdir(parents=True)
@@ -29,3 +31,4 @@ class ManifestTestCase(TestCase):
manifest.getFileData("AUX", "test.patch", "SHA512"),
"e30d069dcf284cbcb2d5685f03ca362469026b469dec4f8655d0c9a2bf317f5d9f68f61855ea403f4959bc0b9c003ae824fb9d6ab2472a739950623523af9da9",
)
+ shutil.rmtree(base_tempdir)
diff --git a/lib/portage/util/_async/AsyncTaskFuture.py b/lib/portage/util/_async/AsyncTaskFuture.py
index 0cd034c97..4c2f7a571 100644
--- a/lib/portage/util/_async/AsyncTaskFuture.py
+++ b/lib/portage/util/_async/AsyncTaskFuture.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2021 Gentoo Foundation
+# Copyright 2018-2023 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import os
@@ -20,6 +20,12 @@ class AsyncTaskFuture(AsynchronousTask):
self.future = asyncio.ensure_future(self.future, self.scheduler)
self.future.add_done_callback(self._done_callback)
+ def isAlive(self):
+ """
+ Returns True if self.future is an asyncio.Future that is not done.
+ """
+ return isinstance(self.future, asyncio.Future) and not self.future.done()
+
def _cancel(self):
if not self.future.done():
self.future.cancel()
diff --git a/lib/portage/util/_async/BuildLogger.py b/lib/portage/util/_async/BuildLogger.py
index 502b3390e..0cfc90a94 100644
--- a/lib/portage/util/_async/BuildLogger.py
+++ b/lib/portage/util/_async/BuildLogger.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Gentoo Authors
+# Copyright 2020-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import functools
@@ -6,13 +6,46 @@ import subprocess
from _emerge.AsynchronousTask import AsynchronousTask
+import portage
from portage import os
+from portage.proxy.objectproxy import ObjectProxy
from portage.util import shlex_split
from portage.util._async.PipeLogger import PipeLogger
from portage.util._async.PopenProcess import PopenProcess
from portage.util.futures import asyncio
+class _file_close_wrapper(ObjectProxy):
+ """
+ Prevent fd inheritance via fork, ensuring that we can observe
+ EOF on the read end of the pipe (bug 919072).
+ """
+
+ __slots__ = ("_file",)
+
+ def __init__(self, file):
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, "_file", file)
+ portage.locks._open_fds[file.fileno()] = self
+
+ def _get_target(self):
+ return object.__getattribute__(self, "_file")
+
+ def __getattribute__(self, attr):
+ if attr == "close":
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, "_file"), attr)
+
+ def close(self):
+ file = object.__getattribute__(self, "_file")
+ if not file.closed:
+ # This must only be called if the file is open,
+ # which ensures that file.fileno() does not
+ # collide with an open lock file descriptor.
+ del portage.locks._open_fds[file.fileno()]
+ file.close()
+
+
class BuildLogger(AsynchronousTask):
"""
Write to a log file, with compression support provided by PipeLogger.
@@ -67,7 +100,7 @@ class BuildLogger(AsynchronousTask):
os.close(log_input)
os.close(filter_output)
else:
- self._stdin = os.fdopen(stdin, "wb", 0)
+ self._stdin = _file_close_wrapper(os.fdopen(stdin, "wb", 0))
os.close(filter_input)
os.close(filter_output)
@@ -76,7 +109,7 @@ class BuildLogger(AsynchronousTask):
# that is missing or broken somehow, create a pipe that
# logs directly to pipe_logger.
log_input, stdin = os.pipe()
- self._stdin = os.fdopen(stdin, "wb", 0)
+ self._stdin = _file_close_wrapper(os.fdopen(stdin, "wb", 0))
# Set background=True so that pipe_logger does not log to stdout.
pipe_logger = PipeLogger(
diff --git a/lib/portage/util/_async/ForkProcess.py b/lib/portage/util/_async/ForkProcess.py
index 780545be0..ebcbd9410 100644
--- a/lib/portage/util/_async/ForkProcess.py
+++ b/lib/portage/util/_async/ForkProcess.py
@@ -1,13 +1,14 @@
-# Copyright 2012-2023 Gentoo Authors
+# Copyright 2012-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import fcntl
-import functools
import multiprocessing
import warnings
import signal
import sys
+from typing import Optional
+
import portage
from portage import os
from portage.cache.mappings import slot_dict_class
@@ -23,17 +24,13 @@ class ForkProcess(SpawnProcess):
"kwargs",
"target",
"_child_connection",
- "_proc",
- "_proc_join_task",
+ # Duplicate file descriptors for use by _send_fd_pipes background thread.
+ "_fd_pipes",
)
_file_names = ("connection", "slave_fd")
_files_dict = slot_dict_class(_file_names, prefix="")
- # Number of seconds between poll attempts for process exit status
- # (after the sentinel has become ready).
- _proc_join_interval = 0.1
-
_HAVE_SEND_HANDLE = getattr(multiprocessing.reduction, "HAVE_SEND_HANDLE", False)
def _start(self):
@@ -58,9 +55,14 @@ class ForkProcess(SpawnProcess):
duplex=self._HAVE_SEND_HANDLE
)
- retval = self._spawn(self.args, fd_pipes=self.fd_pipes)
+ # Handle fd_pipes in _main instead, since file descriptors are
+ # not inherited with the multiprocessing "spawn" start method.
+ # Pass fd_pipes=None to spawn here so that it doesn't leave
+ # a closed stdin duplicate in fd_pipes (that would trigger
+ # "Bad file descriptor" error if we tried to send it via
+ # send_handle).
+ self._proc = self._spawn(self.args, fd_pipes=None)
- self.pid = retval[0]
self._registered = True
if self._child_connection is None:
@@ -73,13 +75,49 @@ class ForkProcess(SpawnProcess):
self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
self.fd_pipes.setdefault(1, sys.__stdout__.fileno())
self.fd_pipes.setdefault(2, sys.__stderr__.fileno())
- stdout_fd = os.dup(self.fd_pipes[1])
+ if self.create_pipe is not False:
+ stdout_fd = os.dup(self.fd_pipes[1])
if self._HAVE_SEND_HANDLE:
- master_fd, slave_fd = self._pipe(self.fd_pipes)
- self.fd_pipes[1] = slave_fd
- self.fd_pipes[2] = slave_fd
+ if self.create_pipe is not False:
+ master_fd, slave_fd = self._pipe(self.fd_pipes)
+ self.fd_pipes[1] = slave_fd
+ self.fd_pipes[2] = slave_fd
+ else:
+ if self.logfile:
+ raise NotImplementedError(
+ "logfile conflicts with create_pipe=False"
+ )
+ # When called via process.spawn, SpawnProcess
+ # will have created a pipe earlier, so it would be
+ # redundant to do it here (it could also trigger spawn
+ # recursion via set_term_size as in bug 923750). Use
+ # /dev/null for master_fd, triggering early return
+ # of _main, followed by _async_waitpid.
+ # TODO: Optimize away the need for master_fd here.
+ master_fd = os.open(os.devnull, os.O_RDONLY)
+ slave_fd = None
+
self._files = self._files_dict(connection=connection, slave_fd=slave_fd)
+
+ # Create duplicate file descriptors in self._fd_pipes
+ # so that the caller is free to manage the lifecycle
+ # of the original fd_pipes.
+ self._fd_pipes = {}
+ fd_map = {}
+ for dest, src in list(self.fd_pipes.items()):
+ if src not in fd_map:
+ src_new = fd_map[src] = os.dup(src)
+ old_fdflags = fcntl.fcntl(src, fcntl.F_GETFD)
+ fcntl.fcntl(src_new, fcntl.F_SETFD, old_fdflags)
+ os.set_inheritable(
+ src_new, not bool(old_fdflags & fcntl.FD_CLOEXEC)
+ )
+ self._fd_pipes[dest] = fd_map[src]
+
+ asyncio.ensure_future(
+ self._proc.wait(), self.scheduler
+ ).add_done_callback(self._close_fd_pipes)
else:
master_fd = connection
@@ -87,6 +125,19 @@ class ForkProcess(SpawnProcess):
master_fd, log_file_path=self.logfile, stdout_fd=stdout_fd
)
+ def _close_fd_pipes(self, future):
+ """
+ Cleanup self._fd_pipes if needed, since _send_fd_pipes could
+ have been cancelled.
+ """
+ # future.result() raises asyncio.CancelledError if
+ # future.cancelled(), but that should not happen.
+ future.result()
+ if self._fd_pipes is not None:
+ for fd in set(self._fd_pipes.values()):
+ os.close(fd)
+ self._fd_pipes = None
+
@property
def _fd_pipes_send_handle(self):
"""Returns True if we have a connection to implement fd_pipes via send_handle."""
@@ -101,16 +152,30 @@ class ForkProcess(SpawnProcess):
Communicate with _bootstrap to send fd_pipes via send_handle.
This performs blocking IO, intended for invocation via run_in_executor.
"""
- fd_list = list(set(self.fd_pipes.values()))
- self._files.connection.send(
- (self.fd_pipes, fd_list),
- )
- for fd in fd_list:
- multiprocessing.reduction.send_handle(
- self._files.connection,
- fd,
- self.pid,
+ fd_list = list(set(self._fd_pipes.values()))
+ try:
+ self._files.connection.send(
+ (self._fd_pipes, fd_list),
)
+ for fd in fd_list:
+ multiprocessing.reduction.send_handle(
+ self._files.connection,
+ fd,
+ self.pid,
+ )
+ except BrokenPipeError as e:
+ # This case is triggered by testAsynchronousLockWaitCancel
+ # when the test case terminates the child process while
+ # this thread is still sending the fd_pipes (bug 923852).
+ # Even if the child terminated abnormally, then there is
+ # no harm in suppressing the exception here, since the
+ # child error should have gone to stderr.
+ raise asyncio.CancelledError from e
+
+ # self._fd_pipes contains duplicates that must be closed.
+ for fd in fd_list:
+ os.close(fd)
+ self._fd_pipes = None
async def _main(self, build_logger, pipe_logger, loop=None):
try:
@@ -128,12 +193,15 @@ class ForkProcess(SpawnProcess):
self._files.connection.close()
del self._files.connection
if hasattr(self._files, "slave_fd"):
- os.close(self._files.slave_fd)
+ if self._files.slave_fd is not None:
+ os.close(self._files.slave_fd)
del self._files.slave_fd
await super()._main(build_logger, pipe_logger, loop=loop)
- def _spawn(self, args, fd_pipes=None, **kwargs):
+ def _spawn(
+ self, args: list[str], fd_pipes: Optional[dict[int, int]] = None, **kwargs
+ ) -> portage.process.MultiprocessingProcess:
"""
Override SpawnProcess._spawn to fork a subprocess that calls
self._run(). This uses multiprocessing.Process in order to leverage
@@ -171,11 +239,7 @@ class ForkProcess(SpawnProcess):
)
fd_pipes[0] = stdin_dup
- if self._fd_pipes_send_handle:
- # Handle fd_pipes in _main instead.
- fd_pipes = None
-
- self._proc = multiprocessing.Process(
+ proc = multiprocessing.Process(
target=self._bootstrap,
args=(
self._child_connection,
@@ -186,19 +250,12 @@ class ForkProcess(SpawnProcess):
kwargs,
),
)
- self._proc.start()
+ proc.start()
finally:
if stdin_dup is not None:
os.close(stdin_dup)
- self._proc_join_task = asyncio.ensure_future(
- self._proc_join(self._proc, loop=self.scheduler), loop=self.scheduler
- )
- self._proc_join_task.add_done_callback(
- functools.partial(self._proc_join_done, self._proc)
- )
-
- return [self._proc.pid]
+ return portage.process.MultiprocessingProcess(proc)
def _cancel(self):
if self._proc is None:
@@ -206,64 +263,10 @@ class ForkProcess(SpawnProcess):
else:
self._proc.terminate()
- def _async_wait(self):
- if self._proc_join_task is None:
- super()._async_wait()
-
- def _async_waitpid(self):
- if self._proc_join_task is None:
- super()._async_waitpid()
-
- async def _proc_join(self, proc, loop=None):
- sentinel_reader = self.scheduler.create_future()
- self.scheduler.add_reader(
- proc.sentinel,
- lambda: sentinel_reader.done() or sentinel_reader.set_result(None),
- )
- try:
- await sentinel_reader
- finally:
- # If multiprocessing.Process supports the close method, then
- # access to proc.sentinel will raise ValueError if the
- # sentinel has been closed. In this case it's not safe to call
- # remove_reader, since the file descriptor may have been closed
- # and then reallocated to a concurrent coroutine. When the
- # close method is not supported, proc.sentinel remains open
- # until proc's finalizer is called.
- try:
- self.scheduler.remove_reader(proc.sentinel)
- except ValueError:
- pass
-
- # Now that proc.sentinel is ready, poll until process exit
- # status has become available.
- while True:
- proc.join(0)
- if proc.exitcode is not None:
- break
- await asyncio.sleep(self._proc_join_interval, loop=loop)
-
- def _proc_join_done(self, proc, future):
- future.cancelled() or future.result()
- self._was_cancelled()
- if self.returncode is None:
- self.returncode = proc.exitcode
-
- self._proc = None
- if hasattr(proc, "close"):
- proc.close()
- self._proc_join_task = None
- self._async_wait()
-
def _unregister(self):
super()._unregister()
if self._proc is not None:
- if self._proc.is_alive():
- self._proc.terminate()
- self._proc = None
- if self._proc_join_task is not None:
- self._proc_join_task.cancel()
- self._proc_join_task = None
+ self._proc.terminate()
@staticmethod
def _bootstrap(child_connection, have_send_handle, fd_pipes, target, args, kwargs):
diff --git a/lib/portage/util/_async/PipeLogger.py b/lib/portage/util/_async/PipeLogger.py
index e5cabaa62..5f3c83227 100644
--- a/lib/portage/util/_async/PipeLogger.py
+++ b/lib/portage/util/_async/PipeLogger.py
@@ -14,7 +14,6 @@ from _emerge.AbstractPollTask import AbstractPollTask
class PipeLogger(AbstractPollTask):
-
"""
This can be used for logging output of a child process,
optionally outputting to log_file_path and/or stdout_fd. It can
diff --git a/lib/portage/util/_async/PopenProcess.py b/lib/portage/util/_async/PopenProcess.py
index c9bca1c52..a0e532e27 100644
--- a/lib/portage/util/_async/PopenProcess.py
+++ b/lib/portage/util/_async/PopenProcess.py
@@ -1,6 +1,7 @@
-# Copyright 2012-2021 Gentoo Authors
+# Copyright 2012-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import portage
from _emerge.SubProcess import SubProcess
@@ -11,7 +12,7 @@ class PopenProcess(SubProcess):
)
def _start(self):
- self.pid = self.proc.pid
+ self._proc = portage.process.Process(self.proc.pid)
self._registered = True
if self.pipe_reader is None:
diff --git a/lib/portage/util/_async/TaskScheduler.py b/lib/portage/util/_async/TaskScheduler.py
index 09920111e..ec97a84ac 100644
--- a/lib/portage/util/_async/TaskScheduler.py
+++ b/lib/portage/util/_async/TaskScheduler.py
@@ -5,7 +5,6 @@ from .AsyncScheduler import AsyncScheduler
class TaskScheduler(AsyncScheduler):
-
"""
A simple way to handle scheduling of AbstractPollTask instances. Simply
pass a task iterator into the constructor and call start(). Use the
diff --git a/lib/portage/util/_dyn_libs/LinkageMapELF.py b/lib/portage/util/_dyn_libs/LinkageMapELF.py
index 76feadcac..67ed16ccb 100644
--- a/lib/portage/util/_dyn_libs/LinkageMapELF.py
+++ b/lib/portage/util/_dyn_libs/LinkageMapELF.py
@@ -52,7 +52,6 @@ _approx_multilib_categories = {
class LinkageMapELF:
-
"""Models dynamic linker dependencies."""
_needed_aux_key = "NEEDED.ELF.2"
@@ -107,7 +106,6 @@ class LinkageMapELF:
return key
class _ObjectKey:
-
"""Helper class used as _obj_properties keys for objects."""
__slots__ = ("_key",)
@@ -515,7 +513,6 @@ class LinkageMapELF:
os = _os_merge
class _LibraryCache:
-
"""
Caches properties associated with paths.
diff --git a/lib/portage/util/_dyn_libs/dyn_libs.py b/lib/portage/util/_dyn_libs/dyn_libs.py
index ee28e8839..6f8a07d70 100644
--- a/lib/portage/util/_dyn_libs/dyn_libs.py
+++ b/lib/portage/util/_dyn_libs/dyn_libs.py
@@ -1,14 +1,51 @@
-# Copyright 2021 Gentoo Authors
+# Copyright 2021-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import os
+import stat
+
+import portage
def installed_dynlibs(directory):
- for _dirpath, _dirnames, filenames in os.walk(directory):
+ """
+ This traverses installed *.so symlinks to check if they point to
+ regular files. If a symlink target is outside of the top directory,
+ traversal follows the corresponding file inside the top directory
+ if it exists, and otherwise stops following the symlink.
+ """
+ directory_prefix = f"{directory.rstrip(os.sep)}{os.sep}"
+ for parent, _dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.endswith(".so"):
- return True
+ filename_abs = os.path.join(parent, filename)
+ target = filename_abs
+ levels = 0
+ while True:
+ try:
+ st = os.lstat(target)
+ except OSError:
+ break
+ if stat.S_ISREG(st.st_mode):
+ return True
+ elif stat.S_ISLNK(st.st_mode):
+ levels += 1
+ if levels == 40:
+ portage.writemsg(
+ f"too many levels of symbolic links: {filename_abs}\n",
+ noiselevel=-1,
+ )
+ break
+ target = portage.abssymlink(target)
+ if not target.startswith(directory_prefix):
+ # If target is outside the top directory, then follow the
+ # corresponding file inside the top directory if it exists,
+ # and otherwise stop following.
+ target = os.path.join(
+ directory_prefix, target.lstrip(os.sep)
+ )
+ else:
+ break
return False
diff --git a/lib/portage/util/elf/constants.py b/lib/portage/util/elf/constants.py
index 022e78d77..9216a3535 100644
--- a/lib/portage/util/elf/constants.py
+++ b/lib/portage/util/elf/constants.py
@@ -31,12 +31,17 @@ EM_S390 = 22
EM_ARM = 40
EM_SH = 42
EM_SPARCV9 = 43
+EM_ARC = 45
EM_IA_64 = 50
EM_X86_64 = 62
+EM_ARC_COMPACT = 93
EM_ALTERA_NIOS2 = 113
EM_AARCH64 = 183
+EM_ARC_COMPACT2 = 195
EM_AMDGPU = 224
EM_RISCV = 243
+EM_ARC_COMPACT3_64 = 253
+EM_ARC_COMPACT3 = 255
EM_LOONGARCH = 258
EM_ALPHA = 0x9026
diff --git a/lib/portage/util/file_copy/__init__.py b/lib/portage/util/file_copy/__init__.py
index 2961853d3..f88d4d9d5 100644
--- a/lib/portage/util/file_copy/__init__.py
+++ b/lib/portage/util/file_copy/__init__.py
@@ -23,9 +23,10 @@ def _optimized_copyfile(src, dst):
@param dst: path of destination file
@type dst: str
"""
- with open(src, "rb", buffering=0) as src_file, open(
- dst, "wb", buffering=0
- ) as dst_file:
+ with (
+ open(src, "rb", buffering=0) as src_file,
+ open(dst, "wb", buffering=0) as dst_file,
+ ):
_file_copy(src_file.fileno(), dst_file.fileno())
diff --git a/lib/portage/util/futures/_asyncio/__init__.py b/lib/portage/util/futures/_asyncio/__init__.py
index a5a6cb3a5..22241f335 100644
--- a/lib/portage/util/futures/_asyncio/__init__.py
+++ b/lib/portage/util/futures/_asyncio/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2021 Gentoo Authors
+# Copyright 2018-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -9,17 +9,22 @@ __all__ = (
"CancelledError",
"Future",
"InvalidStateError",
+ "Lock",
"TimeoutError",
"get_child_watcher",
"get_event_loop",
"set_child_watcher",
"get_event_loop_policy",
"set_event_loop_policy",
+ "run",
+ "shield",
"sleep",
"Task",
"wait",
+ "wait_for",
)
+import sys
import types
import weakref
@@ -33,7 +38,10 @@ from asyncio import (
FIRST_EXCEPTION,
Future,
InvalidStateError,
+ Lock as _Lock,
+ shield,
TimeoutError,
+ wait_for,
)
import threading
@@ -102,6 +110,14 @@ def set_child_watcher(watcher):
return get_event_loop_policy().set_child_watcher(watcher)
+# Emulate run since it's the preferred python API.
+def run(coro):
+ return _safe_loop().run_until_complete(coro)
+
+
+run.__doc__ = _real_asyncio.run.__doc__
+
+
def create_subprocess_exec(*args, **kwargs):
"""
Create a subprocess.
@@ -155,6 +171,20 @@ def iscoroutinefunction(func):
return False
+class Lock(_Lock):
+ """
+ Inject loop parameter for python3.9 or less in order to avoid
+ "got Future <Future pending> attached to a different loop" errors.
+ """
+
+ def __init__(self, **kwargs):
+ if sys.version_info >= (3, 10):
+ kwargs.pop("loop", None)
+ elif "loop" not in kwargs:
+ kwargs["loop"] = _safe_loop()._loop
+ super().__init__(**kwargs)
+
+
class Task(Future):
"""
Schedule the execution of a coroutine: wrap it in a future. A task
diff --git a/lib/portage/util/futures/_sync_decorator.py b/lib/portage/util/futures/_sync_decorator.py
index 772983bc8..436e7c346 100644
--- a/lib/portage/util/futures/_sync_decorator.py
+++ b/lib/portage/util/futures/_sync_decorator.py
@@ -39,9 +39,11 @@ def _sync_methods(obj, loop=None):
loop = asyncio._wrap_loop(loop)
return _ObjectAttrWrapper(
obj,
- lambda attr: _sync_decorator(attr, loop=loop)
- if asyncio.iscoroutinefunction(attr)
- else attr,
+ lambda attr: (
+ _sync_decorator(attr, loop=loop)
+ if asyncio.iscoroutinefunction(attr)
+ else attr
+ ),
)
diff --git a/lib/portage/util/futures/executor/fork.py b/lib/portage/util/futures/executor/fork.py
index 61ad6aecf..1e3d01072 100644
--- a/lib/portage/util/futures/executor/fork.py
+++ b/lib/portage/util/futures/executor/fork.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Gentoo Foundation
+# Copyright 2018-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ("ForkExecutor",)
@@ -40,7 +40,9 @@ class ForkExecutor:
"""
future = self._loop.create_future()
proc = AsyncFunction(
- target=functools.partial(self._guarded_fn_call, fn, args, kwargs)
+ target=functools.partial(self._guarded_fn_call, fn, args, kwargs),
+ # Directly inherit stdio streams and run in the foreground with no log.
+ create_pipe=False,
)
self._submit_queue.append((future, proc))
self._schedule()
diff --git a/lib/portage/util/locale.py b/lib/portage/util/locale.py
index 0d0c12015..b6a41e765 100644
--- a/lib/portage/util/locale.py
+++ b/lib/portage/util/locale.py
@@ -1,4 +1,4 @@
-# Copyright 2015-2020 Gentoo Authors
+# Copyright 2015-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
"""
@@ -9,13 +9,15 @@ locale.
import locale
import logging
-import os
+import multiprocessing
+import sys
import textwrap
import traceback
import portage
from portage.util import _unicode_decode, writemsg_level
from portage.util._ctypes import find_library, LoadLibrary
+from portage.util.futures import asyncio
locale_categories = (
@@ -96,13 +98,34 @@ def _check_locale(silent):
return True
+def _set_and_check_locale(silent, env, mylocale):
+ try:
+ if env is not None:
+ try:
+ locale.setlocale(locale.LC_CTYPE, mylocale)
+ except locale.Error:
+ sys.exit(2)
+
+ ret = _check_locale(silent)
+ if ret is None:
+ sys.exit(2)
+ else:
+ sys.exit(0 if ret else 1)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(2)
+
+
def check_locale(silent=False, env=None):
"""
Check whether the locale is sane. Returns True if it is, prints
warning and returns False if it is not. Returns None if the check
can not be executed due to platform limitations.
"""
+ return asyncio.run(async_check_locale(silent=silent, env=env))
+
+async def async_check_locale(silent=False, env=None):
if env is not None:
for v in ("LC_ALL", "LC_CTYPE", "LANG"):
if v in env:
@@ -116,30 +139,17 @@ def check_locale(silent=False, env=None):
except KeyError:
pass
- pid = os.fork()
- if pid == 0:
- portage._ForkWatcher.hook(portage._ForkWatcher)
- try:
- if env is not None:
- try:
- locale.setlocale(locale.LC_CTYPE, portage._native_string(mylocale))
- except locale.Error:
- os._exit(2)
-
- ret = _check_locale(silent)
- if ret is None:
- os._exit(2)
- else:
- os._exit(0 if ret else 1)
- except Exception:
- traceback.print_exc()
- os._exit(2)
-
- pid2, ret = os.waitpid(pid, 0)
- assert pid == pid2
+ proc = multiprocessing.Process(
+ target=_set_and_check_locale,
+ args=(silent, env, None if env is None else portage._native_string(mylocale)),
+ )
+ proc.start()
+ proc = portage.process.MultiprocessingProcess(proc)
+ await proc.wait()
+
pyret = None
- if os.WIFEXITED(ret):
- ret = os.WEXITSTATUS(ret)
+ if proc.returncode >= 0:
+ ret = proc.returncode
if ret != 2:
pyret = ret == 0
@@ -148,13 +158,22 @@ def check_locale(silent=False, env=None):
return pyret
+async_check_locale.__doc__ = check_locale.__doc__
+async_check_locale.__doc__ += """
+ This function is a coroutine.
+"""
+
+
def split_LC_ALL(env):
"""
Replace LC_ALL with split-up LC_* variables if it is defined.
Works on the passed environment (or settings instance).
"""
lc_all = env.get("LC_ALL")
- if lc_all is not None:
+ if lc_all:
for c in locale_categories:
env[c] = lc_all
- del env["LC_ALL"]
+ # Set empty so that config.reset() can restore LC_ALL state,
+ # since del can permanently delete variables which are not
+ # stored in the config's backupenv.
+ env["LC_ALL"] = ""
diff --git a/lib/portage/util/socks5.py b/lib/portage/util/socks5.py
index fedb8599d..6c68ff410 100644
--- a/lib/portage/util/socks5.py
+++ b/lib/portage/util/socks5.py
@@ -1,10 +1,9 @@
# SOCKSv5 proxy manager for network-sandbox
-# Copyright 2015-2021 Gentoo Authors
+# Copyright 2015-2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import errno
import os
-import signal
import socket
import portage.data
@@ -22,7 +21,8 @@ class ProxyManager:
def __init__(self):
self.socket_path = None
- self._pids = []
+ self._proc = None
+ self._proc_waiter = None
def start(self, settings):
"""
@@ -51,9 +51,9 @@ class ProxyManager:
spawn_kwargs.update(
uid=portage_uid, gid=portage_gid, groups=userpriv_groups, umask=0o077
)
- self._pids = spawn(
+ self._proc = spawn(
[_python_interpreter, server_bin, self.socket_path],
- returnpid=True,
+ returnproc=True,
**spawn_kwargs,
)
@@ -61,12 +61,19 @@ class ProxyManager:
"""
Stop the SOCKSv5 server.
"""
- for p in self._pids:
- os.kill(p, signal.SIGINT)
- os.waitpid(p, 0)
+ if self._proc is not None:
+ self._proc.terminate()
+ loop = asyncio.get_event_loop()
+ if self._proc_waiter is None:
+ self._proc_waiter = asyncio.ensure_future(self._proc.wait(), loop)
+ if loop.is_running():
+ self._proc_waiter.add_done_callback(lambda future: future.result())
+ else:
+ loop.run_until_complete(self._proc_waiter)
self.socket_path = None
- self._pids = []
+ self._proc = None
+ self._proc_waiter = None
def is_running(self):
"""
@@ -80,16 +87,11 @@ class ProxyManager:
"""
Wait for the proxy socket to become ready. This method is a coroutine.
"""
+ if self._proc_waiter is None:
+ self._proc_waiter = asyncio.ensure_future(self._proc.wait())
while True:
- try:
- wait_retval = os.waitpid(self._pids[0], os.WNOHANG)
- except OSError as e:
- if e.errno == errno.EINTR:
- continue
- raise
-
- if wait_retval is not None and wait_retval != (0, 0):
+ if self._proc_waiter.done():
raise OSError(3, "No such process")
try:
diff --git a/man/ebuild.5 b/man/ebuild.5
index d8622702f..687c4f9d8 100644
--- a/man/ebuild.5
+++ b/man/ebuild.5
@@ -754,6 +754,9 @@ see the \fBQA CONTROL VARIABLES\fR section for more specific exemptions.
.I bindist
Distribution of built packages is restricted.
.TP
+.I dedupdebug
+Disables dedupdebug for specific packages.
+.TP
.I fetch
like \fImirror\fR but the files will not be fetched via \fBSRC_URI\fR either.
.TP
@@ -808,6 +811,11 @@ is installed.
The package manager may run tests that require an internet connection, even if
the ebuild has
.IR RESTRICT=test .
+.TP
+.I test_privileged
+The package manager may run tests that require superuser permissions, even if
+the ebuild has
+.IR RESTRICT=test .
.RE
.PD 1
.TP
diff --git a/man/emerge.1 b/man/emerge.1
index 866efe1e7..609c9ac99 100644
--- a/man/emerge.1
+++ b/man/emerge.1
@@ -1,4 +1,4 @@
-.TH "EMERGE" "1" "Mar 2023" "Portage @VERSION@" "Portage"
+.TH "EMERGE" "1" "Jan 2024" "Portage @VERSION@" "Portage"
.SH "NAME"
emerge \- Command\-line interface to the Portage system
.SH "SYNOPSIS"
@@ -747,7 +747,8 @@ file to always be merged.
.TP
.BR \-\-nodeps ", " \-O
Merges specified packages without merging any dependencies. Note that
-the build may fail if the dependencies aren't satisfied.
+the build may fail if the dependencies aren't satisfied. This option
+implies \fB--backtrack=0\fR.
.TP
.BR \-\-noreplace ", " \-n
Skips the packages specified on the command\-line that have already
diff --git a/man/make.conf.5 b/man/make.conf.5
index 7872e3a6f..cfd843455 100644
--- a/man/make.conf.5
+++ b/man/make.conf.5
@@ -1,4 +1,4 @@
-.TH "MAKE.CONF" "5" "Mar 2023" "Portage @VERSION@" "Portage"
+.TH "MAKE.CONF" "5" "Jan 2024" "Portage @VERSION@" "Portage"
.SH "NAME"
make.conf \- custom settings for Portage
.SH "SYNOPSIS"
@@ -118,7 +118,7 @@ values are \fIxpak\fI or \fIgpkg\fI. If \fBBINPKG_FORMAT\fR is set to \fIxpak\f
packages will have the file extension .tbz2 for any compression type for historical
reasons. If \fBBINPKG_FORMAT\fR is set to \fIxpak\fR, and FEATURES="binpkg-multi-instance"
is set, packages will have the file extension .xpak. If \fBBINPKG_FORMAT\fR is set
-to \fIgpkg\fR, packages will have the file extension .gpkg.
+to \fIgpkg\fR, packages will have the file extension .gpkg.tar.
.TP
\fBBINPKG_COMPRESS\fR = \fI"compression"\fR
This variable is used to determine the compression used for \fIbinary
@@ -468,13 +468,19 @@ redundant on\-the\-fly compression. The resulting file will be called
.B compressdebug
Compress the debug sections in the split debug files with zlib to save
space. See \fBsplitdebug\fR for general split debug
-information (upon which this feature depends).
+information (upon which this feature depends). See also \fBdedupdebug\fR
+for further debug info size reduction.
.TP
.B config\-protect\-if\-modified
This causes the \fBCONFIG_PROTECT\fR behavior to be skipped for files
that have not been modified since they were installed. This feature is
enabled by default.
.TP
+.B dedupdebug
+Prior to the debugging info being split and compressed, they are
+deduplicated. This feature works only if dwz is installed, and is also
+disabled by \fBnostrip\fR.
+.TP
.B digest
Autogenerate digests for packages when running the
\fBemerge\fR(1) or \fBebuild\fR(1) commands. If the
@@ -568,6 +574,15 @@ After a package is merged or unmerged, sync relevant files to
disk in order to avoid data\-loss in the event of a power failure.
This feature is enabled by default.
.TP
+.B merge\-wait
+Wait for all builds to complete before merging new packages, which only
+matters when using the \fBemerge\fR(1) \fB\-\-jobs\fR option. This
+feature is always effectively enabled and it cannot be disabled for
+packages that satisfy direct or indirect dependencies of the system
+set. For other packages, it can be disabled in order to trade the
+possibility of random build failures for greater parallelism.
+This feature is enabled by default.
+.TP
.B metadata\-transfer
Automatically perform a metadata transfer when `emerge \-\-sync` is run.
In versions of portage >=2.1.5, this feature is disabled by
@@ -640,8 +655,9 @@ terminal to view parallel-fetch progress.
.TP
.B parallel\-install
Use finer\-grained locks when installing packages, allowing for greater
-parallelization. For additional parallelization, disable
-\fIebuild\-locks\fR.
+parallelism. Note that \fIparallel\-install\fR currently has no effect
+unless \fImerge\-wait\fR is disabled. For additional parallelism,
+disable \fIebuild\-locks\fR.
.TP
.B pid\-sandbox
Isolate the process space for the ebuild processes. This makes it
@@ -1313,6 +1329,10 @@ Run tests in packages specifying \fBPROPERTIES\fR="\fBtest_network\fR". Note
that this will most likely cause Internet access during the test suite which
could cause additional costs, privacy concerns and intermittent test failures.
.TP
+.B privileged
+Run tests in packages specifying \fBPROPERTIES\fR="\fBtest_privileged\fR". Note
+that this will cause the test suite to be run with superuser permissions.
+.TP
.RE
.TP
.B RESUMECOMMAND
diff --git a/meson.build b/meson.build
index 425e6310f..aeb3f93e6 100644
--- a/meson.build
+++ b/meson.build
@@ -1,7 +1,7 @@
project(
'portage',
'c',
- version : '3.0.56',
+ version : '3.0.62',
license : 'GPL-2.0-or-later',
meson_version : '>=0.58.0'
)
diff --git a/misc/emerge-delta-webrsync b/misc/emerge-delta-webrsync
index 49c8f606e..a788cdb0e 100755
--- a/misc/emerge-delta-webrsync
+++ b/misc/emerge-delta-webrsync
@@ -553,6 +553,7 @@ sync_local() {
chown -R ${ownership} "${TMPDIR}"
rsync_opts+=" --owner --group"
fi
+ cd "${TMPDIR}" || die "failed to change directory to TMPDIR"
chmod 755 .
rsync ${rsync_opts} . "${repo_location%%/}"
cd "${DISTDIR}"
diff --git a/tox.ini b/tox.ini
index dcae3e87b..23c533c34 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py{39,310,311,312}-{pylint,test},pypy3-test
+envlist = py{39,310,311,312,313}-{pylint,test},pypy3-test
skipsdist = True
[gh-actions]
@@ -8,6 +8,7 @@ python =
3.10: py310
3.11: py311
3.12: py312
+ 3.13: py313
pypy-3: pypy3
[gh-actions:env]