Skip to content

Commit

Permalink
Merge branch 'master' into cleanup-predict
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis authored Jan 7, 2025
2 parents 25d1d7c + a475327 commit d32fdb8
Show file tree
Hide file tree
Showing 10 changed files with 45 additions and 12 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/r_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ jobs:
uses: actions/cache@v4
with:
path: ${{ env.R_LIBS_USER }}
key: ${{ runner.os }}-r-${{ matrix.r }}-7-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.r }}-7-${{ hashFiles('R-package/DESCRIPTION') }}
key: ${{ runner.os }}-r-${{ matrix.r }}-8-${{ hashFiles('R-package/DESCRIPTION') }}
restore-keys: ${{ runner.os }}-r-${{ matrix.r }}-8-${{ hashFiles('R-package/DESCRIPTION') }}
- uses: actions/setup-python@v5
with:
python-version: "3.10"
Expand Down
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,8 @@ if(GOOGLE_TEST)
configure_file(
${xgboost_SOURCE_DIR}/tests/cli/machine.conf.in
${xgboost_BINARY_DIR}/tests/cli/machine.conf
@ONLY)
@ONLY
NEWLINE_STYLE UNIX)
if(BUILD_DEPRECATED_CLI)
add_test(
NAME TestXGBoostCLI
Expand Down
6 changes: 3 additions & 3 deletions R-package/R/xgb.train.R
Original file line number Diff line number Diff line change
Expand Up @@ -459,10 +459,10 @@ xgb.train <- function(params = xgb.params(), data, nrounds, evals = list(),
#' @param seed Random number seed. If not specified, will take a random seed through R's own RNG engine.
#' @param booster (default= `"gbtree"`)
#' Which booster to use. Can be `"gbtree"`, `"gblinear"` or `"dart"`; `"gbtree"` and `"dart"` use tree based models while `"gblinear"` uses linear functions.
#' @param eta,learning_rate (two aliases for the same parameter) (default=0.3)
#' @param eta,learning_rate (two aliases for the same parameter)
#' Step size shrinkage used in update to prevent overfitting. After each boosting step, we can directly get the weights of new features, and `eta` shrinks the feature weights to make the boosting process more conservative.
#'
#' range: \eqn{[0,1]}
#' - range: \eqn{[0,1]}
#' - default value: 0.3 for tree-based boosters, 0.5 for linear booster.
#'
#' Note: should only pass one of `eta` or `learning_rate`. Both refer to the same parameter and there's thus no difference between one or the other.
#' @param gamma,min_split_loss (two aliases for the same parameter) (for Tree Booster) (default=0, alias: `gamma`)
Expand Down
21 changes: 21 additions & 0 deletions R-package/configure.win
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
R_EXE="${R_HOME}/bin${R_ARCH_BIN}/R.exe"
CXX=`"${R_EXE}" CMD config CXX`

cat > test.cpp <<EOL
#include <xmmintrin.h>
int main() {
char data = 0;
const char* address = &data;
_mm_prefetch(address, _MM_HINT_NTA);
return 0;
}
EOL

XGBOOST_MM_PREFETCH_PRESENT=""
${CXX} -o test test.cpp 2>/dev/null && ./test && XGBOOST_MM_PREFETCH_PRESENT="-DXGBOOST_MM_PREFETCH_PRESENT=1"
rm -f ./test
rm -f ./test.cpp

sed \
-e "s/@XGBOOST_MM_PREFETCH_PRESENT@/$XGBOOST_MM_PREFETCH_PRESENT/" \
< src/Makevars.win.in > src/Makevars.win
8 changes: 5 additions & 3 deletions R-package/man/xgb.params.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions R-package/src/Makevars.win → R-package/src/Makevars.win.in
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ PKG_CPPFLAGS = \
-I$(PKGROOT)/include \
-I$(PKGROOT)/dmlc-core/include \
-I$(PKGROOT) \
-DXGBOOST_BUILTIN_PREFETCH_PRESENT=1 \
@XGBOOST_MM_PREFETCH_PRESENT@ \
$(XGB_RFLAGS)

PKG_CXXFLAGS = \
Expand Down
4 changes: 3 additions & 1 deletion cmake/Version.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,7 @@ function(write_version)
message(STATUS "xgboost VERSION: ${xgboost_VERSION}")
configure_file(
${xgboost_SOURCE_DIR}/cmake/version_config.h.in
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h @ONLY)
${xgboost_SOURCE_DIR}/include/xgboost/version_config.h
@ONLY
NEWLINE_STYLE UNIX)
endfunction()
5 changes: 5 additions & 0 deletions doc/parameter.rst
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,11 @@ Parameters for Linear Booster (``booster=gblinear``)

- L1 regularization term on weights. Increasing this value will make model more conservative. Normalised to number of training examples.

* ``eta`` [default=0.5, alias: ``learning_rate``]

- Step size shrinkage used in update to prevent overfitting. After each boosting step, we can directly get the weights of new features, and ``eta`` shrinks the feature weights to make the boosting process more conservative.
- range: [0,1]

* ``updater`` [default= ``shotgun``]

- Choice of algorithm to fit linear model
Expand Down
2 changes: 1 addition & 1 deletion doc/tutorials/model.rst
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ It remains to ask: which tree do we want at each step? A natural thing is to ad

.. math::
\text{obj}^{(t)} & = \sum_{i=1}^n l(y_i, \hat{y}_i^{(t)}) + \sum_{i=1}^t\omega(f_i) \\
\text{obj}^{(t)} & = \sum_{i=1}^n l(y_i, \hat{y}_i^{(t)}) + \sum_{k=1}^t\omega(f_k) \\
& = \sum_{i=1}^n l(y_i, \hat{y}_i^{(t-1)} + f_t(x_i)) + \omega(f_t) + \mathrm{constant}
If we consider using mean squared error (MSE) as our loss function, the objective becomes
Expand Down
2 changes: 1 addition & 1 deletion ops/script/test_r_package.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def pkgroot(path: str) -> None:
if os.path.exists(osxmakef):
os.remove(osxmakef)
pkgroot("Makevars.in")
pkgroot("Makevars.win")
pkgroot("Makevars.win.in")
# misc
rwsp = Path("R-package") / "remove_warning_suppression_pragma.sh"
if system() != "Windows":
Expand Down

0 comments on commit d32fdb8

Please sign in to comment.