summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'sci-libs')
-rw-r--r--sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-numpy-1.24.patch22
-rw-r--r--sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-scikit-learn-1.2.0.patch104
-rw-r--r--sci-libs/scikit-optimize/scikit-optimize-0.9.0-r1.ebuild39
3 files changed, 165 insertions, 0 deletions
diff --git a/sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-numpy-1.24.patch b/sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-numpy-1.24.patch
new file mode 100644
index 000000000000..65fc26f3eed1
--- /dev/null
+++ b/sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-numpy-1.24.patch
@@ -0,0 +1,22 @@
+diff --git a/skopt/space/transformers.py b/skopt/space/transformers.py
+index 68892952..87cc3b68 100644
+--- a/skopt/space/transformers.py
++++ b/skopt/space/transformers.py
+@@ -259,7 +259,7 @@ def transform(self, X):
+ if (self.high - self.low) == 0.:
+ return X * 0.
+ if self.is_int:
+- return (np.round(X).astype(np.int) - self.low) /\
++ return (np.round(X).astype(np.int64) - self.low) /\
+ (self.high - self.low)
+ else:
+ return (X - self.low) / (self.high - self.low)
+@@ -272,7 +272,7 @@ def inverse_transform(self, X):
+ raise ValueError("All values should be greater than 0.0")
+ X_orig = X * (self.high - self.low) + self.low
+ if self.is_int:
+- return np.round(X_orig).astype(np.int)
++ return np.round(X_orig).astype(np.int64)
+ return X_orig
+
+
diff --git a/sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-scikit-learn-1.2.0.patch b/sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-scikit-learn-1.2.0.patch
new file mode 100644
index 000000000000..8cf8cff9479f
--- /dev/null
+++ b/sci-libs/scikit-optimize/files/scikit-optimize-0.9.0-scikit-learn-1.2.0.patch
@@ -0,0 +1,104 @@
+diff --git a/skopt/learning/forest.py b/skopt/learning/forest.py
+index 096770c1d..ebde568f5 100644
+--- a/skopt/learning/forest.py
++++ b/skopt/learning/forest.py
+@@ -27,7 +27,7 @@ def _return_std(X, trees, predictions, min_variance):
+ -------
+ std : array-like, shape=(n_samples,)
+ Standard deviation of `y` at `X`. If criterion
+- is set to "mse", then `std[i] ~= std(y | X[i])`.
++ is set to "squared_error", then `std[i] ~= std(y | X[i])`.
+
+ """
+ # This derives std(y | x) as described in 4.3.2 of arXiv:1211.0906
+@@ -61,9 +61,9 @@ class RandomForestRegressor(_sk_RandomForestRegressor):
+ n_estimators : integer, optional (default=10)
+ The number of trees in the forest.
+
+- criterion : string, optional (default="mse")
++ criterion : string, optional (default="squared_error")
+ The function to measure the quality of a split. Supported criteria
+- are "mse" for the mean squared error, which is equal to variance
++ are "squared_error" for the mean squared error, which is equal to variance
+ reduction as feature selection criterion, and "mae" for the mean
+ absolute error.
+
+@@ -194,7 +194,7 @@ class RandomForestRegressor(_sk_RandomForestRegressor):
+ .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
+
+ """
+- def __init__(self, n_estimators=10, criterion='mse', max_depth=None,
++ def __init__(self, n_estimators=10, criterion='squared_error', max_depth=None,
+ min_samples_split=2, min_samples_leaf=1,
+ min_weight_fraction_leaf=0.0, max_features='auto',
+ max_leaf_nodes=None, min_impurity_decrease=0.,
+@@ -228,20 +228,20 @@ def predict(self, X, return_std=False):
+ Returns
+ -------
+ predictions : array-like of shape = (n_samples,)
+- Predicted values for X. If criterion is set to "mse",
++ Predicted values for X. If criterion is set to "squared_error",
+ then `predictions[i] ~= mean(y | X[i])`.
+
+ std : array-like of shape=(n_samples,)
+ Standard deviation of `y` at `X`. If criterion
+- is set to "mse", then `std[i] ~= std(y | X[i])`.
++ is set to "squared_error", then `std[i] ~= std(y | X[i])`.
+
+ """
+ mean = super(RandomForestRegressor, self).predict(X)
+
+ if return_std:
+- if self.criterion != "mse":
++ if self.criterion != "squared_error":
+ raise ValueError(
+- "Expected impurity to be 'mse', got %s instead"
++ "Expected impurity to be 'squared_error', got %s instead"
+ % self.criterion)
+ std = _return_std(X, self.estimators_, mean, self.min_variance)
+ return mean, std
+@@ -257,9 +257,9 @@ class ExtraTreesRegressor(_sk_ExtraTreesRegressor):
+ n_estimators : integer, optional (default=10)
+ The number of trees in the forest.
+
+- criterion : string, optional (default="mse")
++ criterion : string, optional (default="squared_error")
+ The function to measure the quality of a split. Supported criteria
+- are "mse" for the mean squared error, which is equal to variance
++ are "squared_error" for the mean squared error, which is equal to variance
+ reduction as feature selection criterion, and "mae" for the mean
+ absolute error.
+
+@@ -390,7 +390,7 @@ class ExtraTreesRegressor(_sk_ExtraTreesRegressor):
+ .. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
+
+ """
+- def __init__(self, n_estimators=10, criterion='mse', max_depth=None,
++ def __init__(self, n_estimators=10, criterion='squared_error', max_depth=None,
+ min_samples_split=2, min_samples_leaf=1,
+ min_weight_fraction_leaf=0.0, max_features='auto',
+ max_leaf_nodes=None, min_impurity_decrease=0.,
+@@ -425,19 +425,19 @@ def predict(self, X, return_std=False):
+ Returns
+ -------
+ predictions : array-like of shape=(n_samples,)
+- Predicted values for X. If criterion is set to "mse",
++ Predicted values for X. If criterion is set to "squared_error",
+ then `predictions[i] ~= mean(y | X[i])`.
+
+ std : array-like of shape=(n_samples,)
+ Standard deviation of `y` at `X`. If criterion
+- is set to "mse", then `std[i] ~= std(y | X[i])`.
++ is set to "squared_error", then `std[i] ~= std(y | X[i])`.
+ """
+ mean = super(ExtraTreesRegressor, self).predict(X)
+
+ if return_std:
+- if self.criterion != "mse":
++ if self.criterion != "squared_error":
+ raise ValueError(
+- "Expected impurity to be 'mse', got %s instead"
++ "Expected impurity to be 'squared_error', got %s instead"
+ % self.criterion)
+ std = _return_std(X, self.estimators_, mean, self.min_variance)
+ return mean, std
diff --git a/sci-libs/scikit-optimize/scikit-optimize-0.9.0-r1.ebuild b/sci-libs/scikit-optimize/scikit-optimize-0.9.0-r1.ebuild
new file mode 100644
index 000000000000..694cd3ffafeb
--- /dev/null
+++ b/sci-libs/scikit-optimize/scikit-optimize-0.9.0-r1.ebuild
@@ -0,0 +1,39 @@
+# Copyright 2020-2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+DISTUTILS_USE_PEP517=setuptools
+PYPI_NO_NORMALIZE=1
+PYTHON_COMPAT=( python3_{10..11} )
+inherit distutils-r1 pypi
+
+DESCRIPTION="Sequential model-based optimization library"
+HOMEPAGE="https://scikit-optimize.github.io/"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+
+RDEPEND="
+ >=dev-python/joblib-0.11[${PYTHON_USEDEP}]
+ dev-python/pyyaml[${PYTHON_USEDEP}]
+ >=dev-python/matplotlib-2.0.0[${PYTHON_USEDEP}]
+ >=dev-python/numpy-1.13.3[${PYTHON_USEDEP}]
+ >=dev-python/scipy-0.19.1[${PYTHON_USEDEP}]
+ >=sci-libs/scikit-learn-0.20.0[${PYTHON_USEDEP}]
+"
+
+PATCHES=(
+ # https://github.com/scikit-optimize/scikit-optimize/pull/1187
+ "${FILESDIR}/${P}-numpy-1.24.patch"
+ # https://github.com/scikit-optimize/scikit-optimize/pull/1184/files
+ "${FILESDIR}/${P}-scikit-learn-1.2.0.patch"
+)
+
+distutils_enable_tests pytest
+# No such file or directory: image/logo.png
+#distutils_enable_sphinx doc \
+# dev-python/numpydoc \
+# dev-python/sphinx-issues \
+# dev-python/sphinx-gallery