diff --git a/.Rbuildignore b/.Rbuildignore
index 23d05874..904872e7 100644
--- a/.Rbuildignore
+++ b/.Rbuildignore
@@ -1 +1,2 @@
cran-comments.md
+CHANGELOG.md
diff --git a/vignettes/.DS_Store b/._.DS_Store
old mode 100644
new mode 100755
similarity index 61%
rename from vignettes/.DS_Store
rename to ._.DS_Store
index 08d1559c..fcddd8e7
Binary files a/vignettes/.DS_Store and b/._.DS_Store differ
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..f9ee15f2
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,10 @@
+.git/
+.github/
+build/
+data/
+inst/
+man/
+R/
+tests/
+tools/
+vignettes/
diff --git a/.github/workflows/dev-enviro.yml b/.github/workflows/dev-enviro.yml
new file mode 100644
index 00000000..7dd99474
--- /dev/null
+++ b/.github/workflows/dev-enviro.yml
@@ -0,0 +1,32 @@
+name: Developer environment
+
+on:
+ workflow_dispatch:
+
+jobs:
+ test:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ max-parallel: 4
+ matrix:
+ python-version: ["3.11"]
+ os: [ubuntu-22.04]
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ - name: Install dependencies
+ run: |
+ python -m pip install -U pip wheel
+ python -m pip install -r requirements_dev.txt
+ - name: Check all Makefile commands
+ run: |
+ make clean
+ make lint
+ make format
+ make test
+ make coverage
+ make build
diff --git a/.github/workflows/python-test-windows.yml b/.github/workflows/python-test-windows.yml
new file mode 100644
index 00000000..1a097d71
--- /dev/null
+++ b/.github/workflows/python-test-windows.yml
@@ -0,0 +1,95 @@
+name: Python test windows
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - main
+
+env:
+ PYTHON_LIBRARY_DIR: $Env:CONDA\Lib\site-packages
+ PYTHON_EXECUTABLE: $Env:CONDA\python.exe
+ BUILD_TYPE: Release
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref_name }}
+ cancel-in-progress: true
+
+jobs:
+ test:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ max-parallel: 4
+ matrix:
+ python-version: ["3.11"]
+ os: ["windows-2022"]
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: set up python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ - name: set dependency direcory
+ run: echo "DEPENDENCY_DIR=$HOME\github-deps" >> $env:GITHUB_ENV
+ - name: restore github dependencies from cache
+ id: cache-github
+ uses: actions/cache@v4
+ with:
+ path: ${{ env.DEPENDENCY_DIR }}
+ key: ${{runner.os}}-github-deps
+ - name: create dependency dir
+ if: steps.cache-github.outputs.cache-hit != 'true'
+ run: mkdir ${{ env.DEPENDENCY_DIR }}
+ - name: download eigen
+ if: steps.cache-github.outputs.cache-hit != 'true'
+ run: |
+ cd ${{ env.DEPENDENCY_DIR }}
+ git clone --depth 1 https://github.com/libigl/eigen.git
+ - name: install nlopt
+ if: steps.cache-github.outputs.cache-hit != 'true'
+ run: |
+ cd ${{ env.DEPENDENCY_DIR }}
+ git clone --depth 1 https://github.com/stevengj/nlopt.git
+ cd nlopt
+ mkdir build
+ cd build
+ cmake ..
+ cmake --build . --config Release
+ - name: install gsl
+ if: steps.cache-github.outputs.cache-hit != 'true'
+ run: |
+ cd ${{ env.DEPENDENCY_DIR }}
+ git clone --depth 1 https://github.com/ampl/gsl.git
+ cd gsl
+ mkdir build
+ cd build
+ cmake .. -DNO_AMPL_BINDINGS=1
+ cmake --build . --config Release
+ - name: set eigen/nlopt/gsl directories
+ run: |
+ echo "EIGEN_DIR=${{ env.DEPENDENCY_DIR }}\eigen" >> $env:GITHUB_ENV
+ echo "NLOPT_DIR=${{ env.DEPENDENCY_DIR }}\nlopt\src\api;${{ env.DEPENDENCY_DIR }}\nlopt\build\Release;${{ env.DEPENDENCY_DIR }}\nlopt\build" >> $env:GITHUB_ENV
+ echo "GSL_DIR=${{ env.DEPENDENCY_DIR }}\gsl\build\Release;${{ env.DEPENDENCY_DIR }}\gsl\build" >> $env:GITHUB_ENV
+ - name: build bmds with cmake
+ shell: pwsh
+ run: |
+ mkdir build
+ cd build
+ cmake ..
+ cmake --build . --config Release
+ - name: Install dependencies
+ run: |
+ python -m pip install -U pip wheel
+ python -m pip install -r requirements_dev.txt
+ - name: Build pybmds
+ run: |
+ python setup.py develop
+ stubgen -p pybmds.bmdscore -o src
+ ruff format src\pybmds\bmdscore.pyi
+ python setup.py bdist_wheel
+ - uses: actions/upload-artifact@v4
+ with:
+ path: ./dist/*.whl
diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml
new file mode 100644
index 00000000..003cd450
--- /dev/null
+++ b/.github/workflows/python-test.yml
@@ -0,0 +1,45 @@
+name: Python test linux
+
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - main
+
+jobs:
+ test:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ max-parallel: 4
+ matrix:
+ python-version: ["3.11"]
+ os: ["ubuntu-22.04"]
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ - name: Install dependencies
+ run: |
+ python -m pip install -U pip wheel
+ python -m pip install -r requirements_dev.txt
+ - name: Check linting
+ run: |
+ make lint
+ - name: Compile bmdscore and build pybmds
+ run: |
+ source ./tools/linux_ci.sh
+ make build
+ - name: Test with pytest
+ run: |
+ make test
+ - name: Build package
+ run: |
+ source ./tools/linux_ci.sh
+ make dist
+ - uses: actions/upload-artifact@v4
+ with:
+ path: ./dist/*.whl
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
new file mode 100644
index 00000000..8f3d8171
--- /dev/null
+++ b/.github/workflows/wheels.yml
@@ -0,0 +1,26 @@
+name: Build python packages
+
+on:
+ workflow_dispatch:
+ # push:
+ # branches:
+ # - main
+ # tags:
+
+jobs:
+
+ build_wheels:
+ name: Build wheels on ${{ matrix.os }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-22.04, windows-2022, macos-12]
+ steps:
+ - uses: actions/checkout@v4
+ - name: Build wheels
+ uses: pypa/cibuildwheel@v2.17.0
+ env:
+ CIBW_BUILD: "cp311-* cp312-* *-macosx_arm64 *-win_amd64 *-manylinux_x86_64"
+ - uses: actions/upload-artifact@v4
+ with:
+ path: ./wheelhouse/*.whl
diff --git a/.gitignore b/.gitignore
index 9d22eb46..617a0214 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,31 @@
+*.html
+*.docx
*.o
+# *.tar.gz
+*.a
+*.la
*.so
+*.Rproj.user
+*.tgz
+.Rproj.user
+.DS_Store
+*.o
+*.so
+.Rhistory
+build/
+create_dll_compile.bash
+
+# python
+*.egg-info/
+.cache/
+.ipynb_checkpoints/
+.mypy_cache/
+.pytest_cache/
+.vscode/
+dist/
+htmlcov/
+venv/
+*.log
+*.py[cod]
+.coverage
+.env
diff --git a/.gitlab/.gitlab-ci.yml b/.gitlab/.gitlab-ci.yml
new file mode 100644
index 00000000..8cf7a5e7
--- /dev/null
+++ b/.gitlab/.gitlab-ci.yml
@@ -0,0 +1,47 @@
+image: python:3.11-slim
+
+stages:
+ - compile
+
+package:
+ stage: compile
+ tags:
+ - devsecops-instance
+ script: |
+ # install
+ apt-get update -y
+ apt-get install -y automake build-essential libtool make cmake libgslcblas0 libgsl-dev
+
+ # build 3rd party
+ cp $CI_PROJECT_DIR/vendor/nlopt-2.7.1.tar.gz ~/nlopt-2.7.1.tar.gz
+ cp $CI_PROJECT_DIR/vendor/eigen-3.4.0.tar.gz ~/eigen-3.4.0.tar.gz
+ # untar
+ cd ~
+ tar -xvf ~/nlopt-2.7.1.tar.gz
+ tar -xvf ~/eigen-3.4.0.tar.gz
+ # build
+ cd ~/nlopt-2.7.1 && mkdir build && cd build && cmake .. && make install
+ cd ~/eigen-3.4.0 && mkdir build && cd build && cmake .. && make install
+ # cleanup
+ cd ~
+ rm -rf nlopt-2.7.1.tar.gz eigen-3.4.0.tar.gz nlopt-2.7.1 eigen-3.4.0
+
+ # build bmds
+ cd $CI_PROJECT_DIR
+ export "LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH"
+ mkdir -p ../BMD_DLL_COMPILE
+ cp ./.gitlab/create_dll_compile.bash.template ./create_dll_compile.bash
+ chmod 700 ./create_dll_compile.bash
+ ./create_dll_compile.bash
+ cd ../BMD_DLL_COMPILE
+ make -j$(nproc)
+ strip --strip-all ./.libs/libDRBMD.so.0.0.0
+
+ # copy
+ mkdir -p $CI_PROJECT_DIR/libs
+ cp ./.libs/* $CI_PROJECT_DIR/libs
+
+ artifacts:
+ paths:
+ - libs/*.so
+
diff --git a/.gitlab/create_dll_compile.bash.template b/.gitlab/create_dll_compile.bash.template
new file mode 100644
index 00000000..df33bbec
--- /dev/null
+++ b/.gitlab/create_dll_compile.bash.template
@@ -0,0 +1,41 @@
+#!/bin/bash
+#Script to copy over the correct R files and then
+#build the correct make scripts using configure.
+
+####################################################
+#ALWAYS NEEDS A PATH TO THE EIGEN DIRECTORY
+#BY DEFAULT IT ASSUMES IT IS IN /usr/local/include/eigen3
+SCRIPT_EIGEN_INCLUDE=/usr/local/include/eigen3
+
+##################################################
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m Assuming Eigen linear algebra library located at \e[0m"
+echo -e "\e[1;32m $SCRIPT_EIGEN_INCLUDE \e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+##################################################
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m Warning removing all information in the directory \e[0m"
+echo -e "\e[1;32m BMD_DLL_COMPILE \e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+
+rm -rf ../BMD_DLL_COMPILE
+##############################################################
+mkdir ../BMD_DLL_COMPILE
+mkdir ../BMD_DLL_COMPILE/include
+mkdir ../BMD_DLL_COMPILE/code_base
+cp -a ./src/include/* ../BMD_DLL_COMPILE/include
+cp -a ./src/code_base/* ../BMD_DLL_COMPILE/code_base
+cp ./configure.ac ../BMD_DLL_COMPILE
+cp ./Makefile.am ../BMD_DLL_COMPILE
+cp ./version.c ../BMD_DLL_COMPILE
+
+cd ../BMD_DLL_COMPILE
+libtoolize
+aclocal
+autoconf
+automake --add-missing
+./configure --disable-openmp EIGEN_INCLUDE=$SCRIPT_EIGEN_INCLUDE
diff --git a/src/CHANGELOG.md b/CHANGELOG.md
similarity index 57%
rename from src/CHANGELOG.md
rename to CHANGELOG.md
index 6ca6717d..1865edf5 100644
--- a/src/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,23 @@
# Changes
-## Version 22.05 (1.0.1)
+## Version 22.8.1.0.2
+
+### The following fixes are in version 1.0.2:
+ - Function 'single_continuous_fit' and 'ma_continuous_fit' changed error when defining default priors for
+ 'distribution=normal-ncv' when data are negative. Originally the variance was described as mean(Y)/var(Y);
+ however, for negative means, this causes NA error. It is now defined as abs(mean(Y))/var(Y).
+ - Log-normal distribution fits were incorrect when summarized data was used. The correct transformation of summarized data is now performed. The formula for standard deviation was typed in as sqrt(log((sd)^2/mu + 1)) it is now sqrt(log((sd/mu)^2+1)).
+ - Changed default priors for dichotomous fits to be consistant with Wheeler et al. (2020).
+
+### The following changes to fitting were made:
+ - Changed MLE Polynomial fit behavior. Now the terms up to the quadratic are constrained to be in the direction
+ of the response. After this, i.e., degree >= 3, the parameters are unconstrained.
+
+### Known Problems not yet fixed
+ - GoF for MA individual models not given.
+ - GoF for dichotomous models with (0/1) data fails.
+
+## Version 22.5.1.0.1
### The following fixes are in version 1.0.1:
diff --git a/CMAKE_README.md b/CMAKE_README.md
new file mode 100644
index 00000000..b5e0d568
--- /dev/null
+++ b/CMAKE_README.md
@@ -0,0 +1,75 @@
+# CMAKE build instructions
+
+## Developer setup
+### Requirements
+Python 3.11 or higher
+GSL 2.7 or greater
+EIGEN 3.4.0 or greater
+NLOPT 2.6.2 or greater
+CMAKE 3.10 or greater
+
+### Linux compiler
+gcc 9 or greater
+
+### Windows compiler
+Visual Studio 19 or greater
+
+### Environmental Variables needed
+*Note: some may not be needed if installed in standard path*
+
+| Variable Name | Desc | Ex |
+|:----------|:-------|:----|
+|EIGEN_DIR|path to eigen directory|/home/username/libs/eigen-3.4.0|
+|GSL_DIR|path to GSL directory|/usr/local/apps/gsl-2.7|
+|NLOPT_DIR|path to nlopt lib directory|/home/username/libs/nlopt-2.6.2/lib64|
+|CMAKE_C_COMPILER|path to c compiler|/usr/local/apps/gcc/9.1.0/bin/gcc|
+|CMAKE_CXX_COMPILER|path to c++ compiler|/usr/local/apps/gcc/9.1.0/bin/g++|
+|PYTHON_EXECUTABLE|path to python executable |/home/username/mambaforge/bin/python|
+|PYTHON_LIBRARY_DIR|path to python site-packages directory| /home/username/venv/Lib/site-packages|
+
+## Build process
+
+### clone project
+
+```bash
+git clone git@github.com:USEPA/bmds.git
+cd bmds
+```
+
+### build c++ src
+
+```bash
+cd src
+mkdir build
+cd build
+cmake ..
+make # (for Linux/Mac)
+cmake --build . --config Release # (for Windows)
+```
+
+### build c++ test
+
+```bash
+cd src/tests
+mkdir build
+cd build
+cmake ..
+make # (for Linux/Mac)
+cmake --build . --config Release # (for Windows)
+```
+
+### build python shared object
+
+```bash
+mkdir build
+cd build
+cmake ..
+make # (for Linux/Mac)
+cmake --build . --config Release # (for Windows)
+```
+
+### build pybmds
+
+follow instructions in [README.md](README.md)
+
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 00000000..2c89babb
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,94 @@
+cmake_minimum_required(VERSION 3.1)
+
+project(pybind11-download NONE)
+
+include(ExternalProject)
+ExternalProject_Add(
+ pybind11
+ PREFIX .
+ GIT_REPOSITORY "https://github.com/pybind/pybind11.git"
+ GIT_TAG "80dc998efced8ceb2be59756668a7e90e8bef917" # v2.10.4
+ SOURCE_DIR "${CMAKE_BINARY_DIR}/third-party/pybind11"
+ # Override default steps with no action, we just want the clone step.
+ UPDATE_COMMAND ""
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
+)
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+
+IF(DEFINED ENV{EIGEN_DIR})
+ set(EIGEN_DIR $ENV{EIGEN_DIR})
+ENDIF()
+IF(DEFINED ENV{GSL_DIR})
+ set(GSL_DIR $ENV{GSL_DIR})
+ IF(NOT WIN32)
+ set(GSL_ROOT_DIR $ENV{GSL_DIR})
+ ENDIF()
+ENDIF()
+IF(DEFINED ENV{NLOPT_DIR})
+ set(NLOPT_DIR $ENV{NLOPT_DIR})
+ENDIF()
+IF(DEFINED ENV{CMAKE_C_COMPILER})
+ set(CMAKE_C_COMPILER $ENV{CMAKE_C_COMPILER})
+ENDIF()
+IF(DEFINED ENV{CMAKE_CXX_COMPILER})
+ set(CMAKE_CXX_COMPILER $ENV{CMAKE_CXX_COMPILER})
+ENDIF()
+IF(DEFINED ENV{PYTHON_LIBRARY_DIR})
+ set(PYTHON_LIBRARY_DIR $ENV{PYTHON_LIBRARY_DIR})
+ENDIF()
+
+IF(WIN32)
+ add_compile_definitions(RBMDS_EXPORTS)
+ENDIF()
+
+#if(NOT CMAKE_BUILD_TYPE)
+# set(CMAKE_BUILD_TYPE Release)
+#endif()
+
+#set(CMAKE_CXX_FLAGS "-O3")
+#set(CMAKE_CXX_FLAGS_RELEASE "-O3")
+
+IF(WIN32)
+ find_library(GSL_LIB gsl REQUIRED HINTS ${GSL_DIR})
+ELSE()
+ find_package(GSL REQUIRED)
+ENDIF()
+find_library(NLOPT_LIB NAMES NLopt nlopt libnlopt REQUIRED HINTS ${NLOPT_DIR})
+
+
+
+project(bmdscore)
+
+include_directories("${CMAKE_SOURCE_DIR}/src/code_base" "${CMAKE_SOURCE_DIR}/src/include" ${EIGEN_DIR} ${NLOPT_DIR}/../include ${NLOPT_DIR} ${GSL_DIR})
+link_directories(${GSL_LIB_DIR} ${NLOPT_LIB_DIR})
+
+file (GLOB SOURCE_FILES "${CMAKE_SOURCE_DIR}/src/code_base/*.cpp")
+file (GLOB HEADER_FILES "src/include/*.h" "src/code_base/*.h" ${EIGEN_DIR})
+file (GLOB PYTHON_FILES "src/pybmdscpp/*.cpp" "src/pybmdscpp/*.h")
+
+# Set up such that XCode organizes the files
+source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${SOURCE_FILES} ${HEADER_FILES} ${PYTHON_FILES} )
+
+include(pybind11.cmake)
+pybind11_add_module(bmdscore
+ ${SOURCE_FILES}
+ ${HEADER_FILES}
+ ${PYTHON_FILES}
+)
+
+IF(WIN32)
+ target_link_libraries(bmdscore PUBLIC ${GSL_LIB} ${NLOPT_LIB})
+ELSE()
+ target_link_libraries(bmdscore PUBLIC GSL::gsl GSL::gslcblas ${NLOPT_LIB})
+ENDIF(WIN32)
+
+install(TARGETS bmdscore
+ COMPONENT python
+ LIBRARY DESTINATION "${PYTHON_LIBRARY_DIR}"
+ )
diff --git a/DESCRIPTION b/DESCRIPTION
index 4a73967c..a9890199 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,8 +1,8 @@
Package: ToxicR
Type: Package
Title: Analyzing Toxicology Dose-Response Data
-Version: 22.5.1
-Date: 2022-06-05
+Version: 22.8.1.0.2
+Date: 2022-08-31
Authors@R:
c(
person(given = "Matt",
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..996390c2
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) 2022 U.S. Federal Government (in countries where recognized)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..8704866e
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,3 @@
+recursive-include * *.pyi
+recursive-exclude * __pycache__
+recursive-exclude * *.py[co]
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..c199f189
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,42 @@
+.PHONY: clean lint format test coverage build develop
+.DEFAULT_GOAL := help
+
+define PRINT_HELP_PYSCRIPT
+import re, sys
+
+for line in sys.stdin:
+ match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
+ if match:
+ target, help = match.groups()
+ print("%-20s %s" % (target, help))
+endef
+export PRINT_HELP_PYSCRIPT
+
+help:
+ @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
+
+clean: ## remove build artifacts
+ @rm -rf build/ dist/
+
+lint: ## Check for python formatting issues
+ @ruff format . --check && ruff check .
+
+format: ## Modify python code where possible
+ @ruff format . && ruff check . --fix --show-fixes
+
+test: ## Run unit tests
+ @py.test
+
+coverage: ## Generate coverage report
+ @coverage run -m pytest
+ @coverage html
+
+build: ## Rebuild in development environment
+ @python setup.py develop
+ @stubgen -p pybmds.bmdscore -o src/
+ @ruff format src/pybmds/bmdscore.pyi
+ @ls -lh src/pybmds/
+
+dist: ## Build wheel package for distribution
+ @python setup.py bdist_wheel
+ @ls -lh dist
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 00000000..b64c5adc
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,15 @@
+AUTOMAKE_OPTIONS = foreign
+AM_INIT_AUTOMAKE =
+SUBDIRS = code_base
+ACLOCAL_AMFLAGS= -I m4
+CPPFLAGS = -I$(top_srcdir)/code_base -I$(top_srcdir)/include -I$EIGEN_INCLUDE
+CPPFLAGS += -O3 -funroll-loops -std=c++11
+CPPFLAGS += @OPENMP_CFLAGS@ -Wno-ignored-attributes -DEIGEN_NO_DEBUG -march=native
+
+PKG_LIBS = -lgsl -lnlopt @OPENMP_CFLAGS@
+
+lib_LTLIBRARIES = libDRBMD.la
+libDRBMD_la_SOURCES = version.c
+libDRBMD_la_LIBADD = code_base/libBMDS.la
+libDRBMD_la_LIBS = -lgsl -lnlopt @OPENMP_CFLAGS@
+
diff --git a/R/continuous_wrappers.R b/R/continuous_wrappers.R
index 649fe13b..03895077 100644
--- a/R/continuous_wrappers.R
+++ b/R/continuous_wrappers.R
@@ -134,13 +134,13 @@ single_continuous_fit <- function(D,Y,model_type="hill", fit_type = "laplace",
if (distribution == "normal"){
PR$priors[nrow(PR$priors),2] = log(mean(Y[,3])^2)
}else{
- PR$priors[nrow(PR$priors),2] = log(mean(Y[1,])/mean(Y[,3])^2)
+ PR$priors[nrow(PR$priors),2] = log(abs(mean(Y[1,]))/mean(Y[,3])^2)
}
}else{
if (distribution == "normal"){
PR$priors[nrow(PR$priors),2] = log(var(Y[,1]))
}else{
- PR$priors[nrow(PR$priors),2] = log(mean(Y[,1])/var(Y[,1]))
+ PR$priors[nrow(PR$priors),2] = log(abs(mean(Y[,1]))/var(Y[,1]))
}
}
}
diff --git a/R/model_averaging_fits.R b/R/model_averaging_fits.R
index 1ba1ee75..461b3055 100644
--- a/R/model_averaging_fits.R
+++ b/R/model_averaging_fits.R
@@ -100,14 +100,14 @@ ma_continuous_fit <- function(D,Y,model_list=NA, fit_type = "laplace",
if (distribution_list[ii] == "normal"){
PR$priors[nrow(PR$priors),2] = log(mean(Y[,3])^2)
}else{
- PR$priors[nrow(PR$priors),2] = log(mean(Y[1,])/mean(Y[,3])^2)
+ PR$priors[nrow(PR$priors),2] = log(abs(mean(Y[1,]))/mean(Y[,3])^2)
}
}else{
if (distribution_list[ii] == "normal"){
PR$priors[nrow(PR$priors),2] = log(var(Y))
}else{
- PR$priors[nrow(PR$priors),2] = log(mean(Y)/var(Y))
+ PR$priors[nrow(PR$priors),2] = log(abs(mean(Y))/var(Y))
}
}
}
diff --git a/R/opening_messages.R b/R/opening_messages.R
index 2d38eaa3..3d0e01cc 100644
--- a/R/opening_messages.R
+++ b/R/opening_messages.R
@@ -25,6 +25,7 @@
| |/ _ \\ \\/ / |/ __| _ /
| | (_) > <| | (__| | \\ \\
|_|\\___/_/\\_\\_|\\___|_| \\_\\
+ 22.8.1.0.2
___
| |
/ \\ ____()()
diff --git a/R/predict.R b/R/predict.R
index 823c6580..14ee5420 100644
--- a/R/predict.R
+++ b/R/predict.R
@@ -245,4 +245,4 @@
returnV <- list(X = test_doses, Y = f)
}
return(returnV)
-}
\ No newline at end of file
+}
diff --git a/R/prior_classes.R b/R/prior_classes.R
index 1f9533c8..d81e7227 100644
--- a/R/prior_classes.R
+++ b/R/prior_classes.R
@@ -292,8 +292,8 @@ create_prior_list <- function(x1,x2,...){
prior <- create_dichotomous_prior(prior,"gamma")
}
if (dmodel == 3){ #LOGISTIC
- prior <- create_prior_list(normprior( 0, 2, -20, 20),
- lnormprior(0.1, 1, 0, 40))
+ prior <- create_prior_list(normprior( 0, 1, -20, 20),
+ lnormprior(0, 2, 0, 40))
prior <- create_dichotomous_prior(prior,"logistic")
}
if (dmodel == 4){ #LOG-LOGISTIC
@@ -321,13 +321,13 @@ create_prior_list <- function(x1,x2,...){
prior <- create_dichotomous_prior(prior,"multistage")
}
if (dmodel == 7){ #PROBIT
- prior <- create_prior_list(normprior( 0, 2, -20, 20),
- lnormprior(0.1, 1, 0, 40))
+ prior <- create_prior_list(normprior( 0, 1, -20, 20),
+ lnormprior(0, 2, 0, 40))
prior <- create_dichotomous_prior(prior,"probit")
}
if (dmodel == 8){ #QLINEAR
prior <- create_prior_list(normprior( 0, 2, -20, 20),
- lnormprior(0.15, 1, 0, 18))
+ lnormprior(0, 1, 0, 18))
prior <- create_dichotomous_prior(prior,"qlinear")
}
if (dmodel == 9){ #WEIBULL
@@ -352,13 +352,21 @@ create_prior_list <- function(x1,x2,...){
if (dmodel == 5){
if (dvariance == 1){
- prior <- create_prior_list(normprior(0,5,-1000,1000))
+ prior <- create_prior_list(normprior(0,5,-100000,100000))
for (ii in 1:degree){
if(is_increasing){
- prior <- .combine_prior_lists(prior, normprior(0,5,0,18))
+ if (ii <= 2){
+ prior <- .combine_prior_lists(prior, normprior(0,5,0,1e6))
+ }else{
+ prior <- .combine_prior_lists(prior, normprior(0,5,-1e6,1e6))
+ }
} else{
- prior <- .combine_prior_lists(prior, normprior(0,5,-18,0))
+ if (ii <= 2){
+ prior <- .combine_prior_lists(prior, normprior(0,5,-1e6,0))
+ }else{
+ prior <- .combine_prior_lists(prior, normprior(0,5,-1e6,1e6))
+ }
}
}
@@ -371,11 +379,23 @@ create_prior_list <- function(x1,x2,...){
prior <- create_prior_list(normprior(0,5,0,1000))
for (ii in 1:degree){
- prior <- .combine_prior_lists(prior,normprior(0,5,-10000,10000))
+ if(is_increasing){
+ if (ii <= 2){
+ prior <- .combine_prior_lists(prior, normprior(0,5,0,1e6))
+ }else{
+ prior <- .combine_prior_lists(prior, normprior(0,5,-1e6,1e6))
+ }
+ } else{
+ if (ii <= 2){
+ prior <- .combine_prior_lists(prior, normprior(0,5,-1e6,0))
+ }else{
+ prior <- .combine_prior_lists(prior, normprior(0,5,-1e6,1e6))
+ }
+ }
}
prior <- .combine_prior_lists(prior,
- create_prior_list(lnormprior(0,1,0,100),
- normprior (0,1,-18,18)))
+ create_prior_list(normprior(0,1,0,18),
+ normprior (0,1,-18,18)))
prior[[1]][,1] = 0
return(prior)
@@ -439,7 +459,7 @@ create_prior_list <- function(x1,x2,...){
lnormprior(1,0.2,1,18), #d
normprior(0,1,-18,18))
} else if (dvariance == 3){
- prior <- create_prior_list(normprior(0,0.1, -100,100), # a
+ prior <- create_prior_list(normprior(0,0.1, -1000,1000), # a
lnormprior(0,1, 0,100), # b
normprior(0,1, -20,20), # log(c)
lnormprior(1,0.2,1,18), #d
diff --git a/R/summary_dichotomous.R b/R/summary_dichotomous.R
index 03db9f22..90f634b6 100644
--- a/R/summary_dichotomous.R
+++ b/R/summary_dichotomous.R
@@ -62,4 +62,4 @@
s_fit$GOF <- round(s_fit$GOF,3)
rownames(s_fit$GOF) <- c("Test: X^2 GOF")
print(s_fit$GOF)
-}
+}
\ No newline at end of file
diff --git a/README.md b/README.md
index dd3f3550..8ed3b898 100644
--- a/README.md
+++ b/README.md
@@ -1,30 +1,48 @@
-![alt text](https://github.com/wheelemw/ToxicRDocs/blob/main/Toxic-R_Web_Graphic_V13.jpg)
+# BMDS
-# Welcome to the Official Git Repository of ToxicR!
+TODO - describe!
-## What is ToxicR?
+Disclaimer: The United States Government project code is provided on an "as is" basis and the user assumes responsibility for its use. The United States Government has relinquished control of the information and no longer has responsibility to protect the integrity, confidentiality, or availability of the information. Any reference to specific commercial products, processes, or services by service mark, trademark, manufacturer, or otherwise, does not constitute or imply their endorsement, recommendation or favoring by the United States Government. The NIH or EPA seal and logo shall not be used in any manner to imply endorsement of any commercial product or activity by NIH or EPA or the United States Government.
-ToxicR is an R package that utilizes the core functionality of the US EPA's Benchmark Dose Software and the NTP's BMDexpress, which was developed in unison with researchers from the National Institute of Environmental Health. Unlike those packages, ToxicR is fully interactive and can be used to create custom analysis pipelines within the R programming environment.
+## ToxicR
-## Current Release
+TODO
+## pybmds
-Version 1.0.1 is the most recent release. For precompiled binaries of this release, go to [Release V.1.0.0](https://github.com/NIEHS/ToxicR/releases/tag/v1.0.0).
+### Developer setup
-### Installation
+Make sure you have python 3.11 or higher available on your path.
-For Windows, the .zip file is compiled in R 4.2.
-For macOS, the .tgz file is compiled in R 4.2.
-For Linux, the package must be compiled.
-For more information, please visit the instructions on the [Wiki](https://github.com/NIEHS/ToxicR/wiki).
+```bash
+# clone project
+git clone git@github.com:USEPA/bmds.git
+cd bmds
-## Citing ToxicR
+# create virtual environment and activate
+python -m venv venv --prompt pybmds
+source venv/bin/activate # or venv\Scripts\activate on windows.
-All dose-response methodologies used in ToxicR were developed in the following manuscripts:
+# install packages
+python -m pip install -U pip
+python -m pip install -r requirements_dev.txt
-Wheeler, M.W., Blessinger, T., Shao, K., Allen, B.C., Olszyk, L., Davis, J.A. and Gift, J.S. (2020), Quantitative Risk Assessment: Developing a Bayesian Approach to Dichotomous Dose–Response Uncertainty. Risk Analysis, 40: 1706-1722. https://doi.org/10.1111/risa.13537
+# build
+make build # recompile source for development of pybmds package
-Wheeler, M. W., Cortiñas Abrahantes, J., Aerts, M., Gift, J. S., & Allen Davis, J. (2022). Continuous model averaging for benchmark dose analysis: Averaging over distributional forms. Environmetrics, e2728. https://doi.org/10.1002/env.2728
+# test local install
+pybmds hello
-
-
+# code formatting
+make lint # identify formatting errors
+make format # fix formatting errors when possible
+
+# testing
+make test # run tests
+make coverage # show test coverage
+
+# distribution
+make dist # build a portable python wheel for distribution
+```
+
+Github actions are setup to execute whenever code is pushed to check code formatting and successful tests. In addition, when code is pushed to the `main` branch, a wheel artifact is created and stored on github.
diff --git a/configure.ac b/configure.ac
index 84862930..20a7826a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,115 +1,32 @@
-# Configure Script for ToxicR
-# Modified 4/27/2022
-# Configure Script for ToxicR
-# Modified 4/27/2022
+AC_INIT([bmds_shared_object], [1.0], [matt.wheeler@nih.gov])
+AM_INIT_AUTOMAKE
+AC_CONFIG_MACRO_DIRS([m4])
+AC_PROG_CXX
+AC_PROG_CC
+AC_CHECK_LIB([m],[cos])
+AC_CHECK_LIB([gslcblas],[cblas_dgemm])
+AC_CHECK_LIB(gsl, [gsl_ran_gaussian], [], [
+ echo "Error! You need to have libgsl installed."
+ exit -1
+ ])
+AC_CHECK_LIB(nlopt, [nlopt_get_algorithm], [], [
+ echo "Error! You need to have libnlopt installed."
+ exit -1
+ ])
-AC_INIT([ToxicR], 1.0.1) dnl Package version
-CXX=`"${R_HOME}/bin/R" CMD config CXX`
-CXXFLAGS=`"${R_HOME}/bin/R" CMD config CXXFLAGS`
-CPPFLAGS=`"${R_HOME}/bin/R" CMD config CPPFLAGS`
-AC_LANG(C++)
-AC_PROG_CPP
+AC_ARG_VAR(EIGEN_INCLUDE,Location of Eigen include directory e.g, /usr/include/Eigen3)
-#determine the OS
-AC_MSG_CHECKING([Is OPENMP avaialble? ])
-OS_FLAG="$(uname)"
-if test x"${OS_FLAG}" == x"Darwin";then
- AC_MSG_RESULT([no])
- OPENMP=""
-else
- AC_MSG_RESULT([yes])
- OPENMP="\$(SHLIB_OPENMP_CXXFLAGS) "
-fi
+#Check for OpenMP
+AC_OPENMP
-AC_PATH_PROG([GSL_CONFIG], [gsl-config])
-## If gsl-config was found, let's use it
-if test "${GSL_CONFIG}" != ""; then
- AC_MSG_RESULT([yes])
- # Use gsl-config for header and linker arguments
- gsl_include=$(gsl-config --cflags)
- gsl_libs=$(gsl-config --libs)
-else
- AC_MSG_ERROR([gsl-config not found, is GSL installed?
- To install GSL in Ubuntu Type:
- sudo apt install gsl
- To install GSL in Fedora Type:
- sudo yum -y install gsl
- To iinstall GSL on macOS using homebrew type:
- brew install gsl
- ])
-fi
+AC_SUBST(OPENMP_CFLAGS)
-## Can we use pkg-config?
-AC_PATH_PROG(have_pkg_config, pkg-config, no)
-AC_MSG_CHECKING([Is pkg-config avaialble? ])
-if test x"${have_pkg_config}" != x"no"; then
- AC_MSG_RESULT([yes])
- AC_MSG_CHECKING([if pkg-config knows NLopt])
- if pkg-config --exists nlopt; then
- AC_MSG_RESULT([yes])
- ## Since nlopt has been found, test for minimal version requirement
- AC_MSG_CHECKING([for pkg-config checking NLopt version])
- if pkg-config --atleast-version=2.6.0 nlopt; then
- AC_MSG_RESULT([>= 2.6.0])
- nlopt_include=$(pkg-config --cflags nlopt)
- nlopt_libs=$(pkg-config --libs nlopt)
- AC_SUBST([NLOPT_INCLUDE], "${nlopt_include}")
- AC_SUBST([NLOPT_LIBS], "${nlopt_libs}")
- else
- AC_MSG_RESULT([no])
- #if it is macOS just try link and use nlopt generically
- need_to_build="yes"
- fi
- else
- AC_MSG_RESULT([no])
- #if it is macOS just try link and use nlopt generically
- if test x"${OS_FLAG}" == x"Darwin";then
- AC_MSG_RESULT([>= 2.6.0])
- nlopt_include=""
- nlopt_libs="-lnlopt"
- AC_SUBST([NLOPT_INCLUDE], "${nlopt_include}")
- AC_SUBST([NLOPT_LIBS], "${nlopt_libs}")
- else
- need_to_build="yes"
- fi
-
- fi
-
-else
- AC_MSG_RESULT([no])
- need_to_build="yes"
-fi
+LT_PREREQ([2.2])
+LT_INIT()
-## So do we need to build
-if test x"${need_to_build}" != x"no"; then
- AC_PATH_PROG(have_cmake, cmake, no)
-
- if test x"${have_cmake}" == x"no"; then
- . src/scripts/cmake_config.sh
- if test -z "${CMAKE_BIN}"; then
- ## also error to end configure here
- AC_MSG_ERROR([Could not find 'cmake'.])
- fi
- fi
- ## 'uname -m' on M1 give x86_64 which is ... not helping
- machine=`"${R_HOME}/bin/Rscript" -e 'cat(Sys.info()[["machine"]])'`
- AC_MSG_RESULT([using NLopt via local cmake build on ${machine} ])
- tools/cmake_call.sh
- ## cmake_call.sh installs into nlopt/lib, headers are copied
- nlopt_include="-I./nlopt/include"
- nlopt_libs="-L./nlopt/lib -lnlopt"
-fi
+AC_CONFIG_FILES([code_base/Makefile
+ Makefile
+ ])
-SUBDIR_SOURCES="$(cd src/ && ls {code_base,polyK}/*.cpp | tr '\n' ' ')"
-SRC_SOURCES="$( cd src/ && ls *.cpp | tr '\n' ' ')"
-
-AC_SUBST(OPENMP)
-AC_SUBST(SRC_SOURCES)
-AC_SUBST(SUBDIR_SOURCES)
-AC_SUBST([NLOPT_CPPFLAGS],["${nlopt_include}"])
-AC_SUBST([NLOPT_LIBS],["${nlopt_libs}"])
-AC_SUBST([GSL_CPPFLAGS],["${gsl_include}"])
-AC_SUBST([GSL_LIBS],["${gsl_libs}"])
-AC_CONFIG_FILES([src/Makevars])
AC_OUTPUT
diff --git a/create_dll_compile.bash.template b/create_dll_compile.bash.template
new file mode 100755
index 00000000..1d8f8cd7
--- /dev/null
+++ b/create_dll_compile.bash.template
@@ -0,0 +1,42 @@
+#!/bin/bash
+#Script to copy over the correct R files and then
+#build the correct make scripts using configure.
+
+####################################################
+#ALWAYS NEEDS A PATH TO THE EIGEN DIRECTORY
+#BY DEFAULT IT ASSUMES IT IS IN /usr/local/include/eigen3
+SCRIPT_EIGEN_INCLUDE=/usr/local/include/eigen3
+
+
+##################################################
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m Assuming Eigen linear algebra library located at \e[0m"
+echo -e "\e[1;32m $SCRIPT_EIGEN_INCLUDE \e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+##################################################
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m Warning removing all information in the directory \e[0m"
+echo -e "\e[1;32m BMD_DLL_COMPILE \e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+echo -e "\e[1;31m ------------------------------------------------\e[0m"
+
+rm -rf ../BMD_DLL_COMPILE
+##############################################################
+mkdir ../BMD_DLL_COMPILE
+mkdir ../BMD_DLL_COMPILE/include
+mkdir ../BMD_DLL_COMPILE/code_base
+cp -a ./src/include/* ../BMD_DLL_COMPILE/include
+cp -a ./src/code_base/* ../BMD_DLL_COMPILE/code_base
+cp ./configure.ac ../BMD_DLL_COMPILE
+cp ./Makefile.am ../BMD_DLL_COMPILE
+cp ./version.c ../BMD_DLL_COMPILE
+
+cd ../BMD_DLL_COMPILE
+libtoolize
+aclocal
+autoconf
+automake --add-missing
+./configure --disable-openmp EIGEN_INCLUDE=$SCRIPT_EIGEN_INCLUDE
diff --git a/dll/README.md b/dll/README.md
new file mode 100644
index 00000000..b72c0097
--- /dev/null
+++ b/dll/README.md
@@ -0,0 +1,105 @@
+# The Dll
+
+A shared-link library can also be built as a lower-level interface to core BMDS functionality.
+
+## Mac (homebrew)
+
+Use gcc instead of xcode:
+
+```bash
+brew install gcc automake cmake make libtool
+ln -s /usr/local/bin/glibtoolize /usr/local/bin/libtoolize
+export "CC=/usr/local/bin/gcc-11"
+export "CXX=/usr/local/bin/g++-11"
+export "LDFLAGS=-L/usr/local/lib/"
+export "CPPFLAGS=-I/usr/local/include"
+```
+
+Next, install dependencies:
+
+```bash
+export "CC=/usr/local/bin/gcc-11"
+export "CXX=/usr/local/bin/g++-11"
+export "LDFLAGS=-L/usr/local/lib/"
+export "CPPFLAGS=-I/usr/local/include"
+wget https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.tar.gz
+wget https://github.com/stevengj/nlopt/archive/v2.6.2.tar.gz
+# build eigen
+tar -xvf ./eigen-3.3.7.tar.gz
+cd eigen-3.3.7 && mkdir build && cd build && cmake .. && make install
+cd ../.. && rm -rf eigen-3.3.7/ && rm eigen-3.3.7.tar.gz
+# build nlopt
+tar -xvf ./v2.6.2.tar.gz
+cd ./nlopt-2.6.2/ && mkdir build && cd build && cmake .. && make install
+cd ../.. && rm -rf nlopt-2.6.2/ && rm v2.6.2.tar.gz
+```
+
+Finally, build rbmds:
+
+```bash
+export "CC=/usr/local/bin/gcc-11"
+export "CXX=/usr/local/bin/g++-11"
+export "LDFLAGS=-L/usr/local/lib/"
+export "CPPFLAGS=-I/usr/local/include"
+# build bmds
+rm -rf ../BMD_DLL_COMPILE
+mkdir ../BMD_DLL_COMPILE
+
+# disable openmp support on mac by default
+sed -i 's/.\/configure EIGEN_INCLUDE/.\/configure \-\-disable\-openmp EIGEN_INCLUDE/g' ./create_dll_compile.bash
+./create_dll_compile.bash
+cd ../BMD_DLL_COMPILE
+make -j6 # -j flag is num processors; hardware-specific
+mkdir -p ../RBMDS/build/mac
+cp .libs/* ../RBMDS/build/mac
+```
+
+### Linker troubleshooting
+
+This doesn't seem required in all installs, but just in case:
+
+```bash
+# see if anything is missing here....
+otool -L bmds/bin/BMDS330/libDRBMD.dylib
+bmds/bin/BMDS330/libDRBMD.dylib:
+ /usr/local/lib/libDRBMD.0.dylib (compatibility version 1.0.0, current version 1.0.0)
+ /usr/local/lib/qt/libnlopt.0.dylib (compatibility version 0.0.0, current version 0.10.0)
+ /usr/local/opt/gsl/lib/libgsl.25.dylib (compatibility version 26.0.0, current version 26.0.0)
+ /usr/local/opt/gsl/lib/libgslcblas.0.dylib (compatibility version 1.0.0, current version 1.0.0)
+ /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1281.100.1)
+ /usr/local/lib/gcc/10/libgcc_s.1.dylib (compatibility version 1.0.0, current version 1.0.0)
+
+# sometimes this works, but you can hard-code the path
+install_name_tool -change \
+ @rpath/libnlopt.0.dylib \
+ /usr/local/lib/libnlopt.0.dylib \
+ bmds/bin/BMDS330/libDRBMD.dylib
+```
+
+## Python debian docker container
+
+To build in a standard debian python container:
+
+```bash
+# disable openmp support on mac by default
+sed -i 's/.\/configure EIGEN_INCLUDE/.\/configure \-\-disable\-openmp EIGEN_INCLUDE/g' ./create_dll_compile.bash
+
+# build in a fresh container
+docker build -t rbmds-python:latest -f dll/python/Dockerfile --platform="linux/x86_64" .
+
+# demo a successful execution inside container
+docker run --rm -it rbmds-python:latest python /app/dll/python/dll_test.py
+
+# copy shared libraries for reuse
+rm -rf build/linux/python
+mkdir -p build/linux/python
+chmod 777 build/linux/python
+docker run --rm -v $(pwd)/build/linux/python:/tmp rbmds-python:latest sh -c "cp /usr/local/lib/*.so /tmp"
+
+# delete container now that we've captured the libraries
+docker rmi rbmds-python:latest
+```
+
+The final step copies the required shared libraries into a fresh python container to demo running
+as a non-root user w/o the build system. There are a few system-level packages that were required
+(see docker file for more details).
diff --git a/dll/python/Dockerfile b/dll/python/Dockerfile
new file mode 100644
index 00000000..efa61b0b
--- /dev/null
+++ b/dll/python/Dockerfile
@@ -0,0 +1,56 @@
+FROM python:3.11-slim AS builder
+
+ENV PYTHONUNBUFFERED 1
+
+RUN apt-get update -y && \
+ apt-get install -y automake build-essential libtool make cmake libgslcblas0 libgsl-dev
+
+COPY ./vendor /vendor
+RUN cd /vendor && \
+ \
+ tar -xvf nlopt-2.7.1.tar.gz && \
+ tar -xvf eigen-3.4.0.tar.gz && \
+ \
+ cd /vendor/nlopt-2.7.1 && mkdir build && cd build && cmake .. && make install && \
+ cd /vendor/eigen-3.4.0 && mkdir build && cd build && cmake .. && make install && \
+ \
+ rm -rf /vendor
+
+ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
+
+COPY ./dll /app/dll
+
+COPY ./create_dll_compile.bash.template /app/create_dll_compile.bash
+COPY . /app
+RUN cd /app && \
+ mkdir -p ../BMD_DLL_COMPILE && \
+ ./create_dll_compile.bash && \
+ cd /BMD_DLL_COMPILE && \
+ make -j$(nproc) && \
+ strip --strip-all ./.libs/libDRBMD.so.0.0.0 && \
+ make install && \
+ python /app/dll/python/dll_test.py
+
+# -----------------------------------------------------------------------------
+# copy outputs into clean docker build
+FROM python:3.11-slim
+
+COPY --from=builder /usr/local/lib/libDRBMD.* /usr/local/lib/
+COPY --from=builder /usr/local/lib/libnlopt* /usr/local/lib/
+COPY --from=builder /app/dll/python/dll_test.py /app/dll/python/dll_test.py
+
+RUN apt-get update -y && \
+ apt-get install -y libgslcblas0 libgsl-dev
+
+RUN groupadd -g 555 -r app && \
+ useradd -u 555 -r -g app app && \
+ chown -R app:app /app
+
+# demo using non-root user
+
+USER app
+
+ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
+
+RUN ldd /usr/local/lib/libDRBMD.so && \
+ python /app/dll/python/dll_test.py
diff --git a/dll/python/dll_test.py b/dll/python/dll_test.py
new file mode 100644
index 00000000..90136774
--- /dev/null
+++ b/dll/python/dll_test.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+
+import ctypes
+import json
+from enum import IntEnum
+from pathlib import Path
+from typing import Any, ClassVar, NamedTuple
+
+
+def _list_to_c(list: list[Any], ctype):
+ return (ctype * len(list))(*list)
+
+
+class DichModel(IntEnum):
+ d_hill = 1
+ d_gamma = 2
+ d_logistic = 3
+ d_loglogistic = 4
+ d_logprobit = 5
+ d_multistage = 6
+ d_probit = 7
+ d_qlinear = 8
+ d_weibull = 9
+
+
+class DichotomousAnalysis(NamedTuple):
+ model: int
+ n: int
+ Y: list[float]
+ doses: list[float]
+ n_group: list[float]
+ prior: list[float]
+ BMD_type: int
+ BMR: float
+ alpha: float
+ degree: int
+ samples: int
+ burnin: int
+ parms: int
+ prior_cols: int
+
+ class Struct(ctypes.Structure):
+ _fields_: ClassVar[list[tuple[str, Any]]] = [
+ ("model", ctypes.c_int), # Model Type as listed in DichModel
+ ("n", ctypes.c_int), # total number of observations obs/n
+ ("Y", ctypes.POINTER(ctypes.c_double)), # observed +
+ ("doses", ctypes.POINTER(ctypes.c_double)),
+ ("n_group", ctypes.POINTER(ctypes.c_double)), # size of the group
+ ("prior", ctypes.POINTER(ctypes.c_double)), # a column order matrix parms X prior_cols
+ ("BMD_type", ctypes.c_int), # 1 = extra ; added otherwise
+ ("BMR", ctypes.c_double),
+ ("alpha", ctypes.c_double), # alpha of the analysis
+ ("degree", ctypes.c_int), # degree of polynomial used only multistage
+ ("samples", ctypes.c_int), # number of MCMC samples
+ ("burnin", ctypes.c_int), # size of burnin
+ ("parms", ctypes.c_int), # number of parameters in the model
+ ("prior_cols", ctypes.c_int), # columns in the prior
+ ]
+
+ def dict(self) -> dict:
+ return dict(
+ model=self.model,
+ n=self.n,
+ Y=self.Y[: self.n],
+ doses=self.doses[: self.n],
+ n_group=self.n_group[: self.n],
+ prior=self.prior[: self.parms * self.prior_cols],
+ BMD_type=self.BMD_type,
+ BMR=self.BMR,
+ alpha=self.alpha,
+ degree=self.degree,
+ samples=self.samples,
+ burnin=self.burnin,
+ parms=self.parms,
+ prior_cols=self.prior_cols,
+ )
+
+ def to_c(self):
+ return self.Struct(
+ model=ctypes.c_int(self.model),
+ n=ctypes.c_int(self.n),
+ Y=_list_to_c(self.Y, ctypes.c_double),
+ doses=_list_to_c(self.doses, ctypes.c_double),
+ n_group=_list_to_c(self.n_group, ctypes.c_double),
+ prior=_list_to_c(self.prior, ctypes.c_double),
+ BMD_type=ctypes.c_int(self.BMD_type),
+ BMR=ctypes.c_double(self.BMR),
+ alpha=ctypes.c_double(self.alpha),
+ degree=ctypes.c_int(self.degree),
+ samples=ctypes.c_int(self.samples),
+ burnin=ctypes.c_int(self.burnin),
+ parms=ctypes.c_int(self.parms),
+ prior_cols=ctypes.c_int(self.prior_cols),
+ )
+
+
+class DichotomousModelResult(NamedTuple):
+ """
+ Purpose: Data structure that is populated with all of the necessary
+ information for a single model fit.
+ """
+
+ model: int
+ nparms: int
+ dist_numE: int
+
+ class Struct(ctypes.Structure):
+ _fields_: ClassVar[list[tuple[str, Any]]] = [
+ ("model", ctypes.c_int), # dichotomous model specification
+ ("nparms", ctypes.c_int), # number of parameters in the model
+ ("parms", ctypes.POINTER(ctypes.c_double)), # parameter estimate
+ ("cov", ctypes.POINTER(ctypes.c_double)), # covariance estimate
+ ("max", ctypes.c_double), # value of the likelihood/posterior at the maximum
+ ("dist_numE", ctypes.c_int), # number of entries in rows for the bmd_dist
+ ("model_df", ctypes.c_double), # Used model degrees of freedom
+ ("total_df", ctypes.c_double), # Total degrees of freedom
+ ("bmd_dist", ctypes.POINTER(ctypes.c_double)), # bmd distribution (dist_numE x 2)
+ ("bmd", ctypes.c_double), # the central estimate of the BMD
+ ]
+
+ def dict(self) -> dict:
+ return dict(
+ model=self.model,
+ nparms=self.nparms,
+ parms=self.parms[: self.nparms],
+ cov=self.cov[: self.nparms**2],
+ max=self.max,
+ dist_numE=self.dist_numE,
+ model_df=self.model_df,
+ total_df=self.total_df,
+ bmd_dist=self.bmd_dist[: self.dist_numE * 2],
+ )
+
+ def to_c(self):
+ return self.Struct(
+ model=ctypes.c_int(self.model),
+ nparms=ctypes.c_int(self.nparms),
+ parms=_list_to_c([0] * self.nparms, ctypes.c_double),
+ cov=_list_to_c([0] * (self.nparms**2), ctypes.c_double),
+ dist_numE=ctypes.c_int(self.dist_numE),
+ bmd_dist=_list_to_c([0] * (self.dist_numE * 2), ctypes.c_double),
+ )
+
+
+def main():
+ path = Path("/usr/local/lib/libDRBMD.so")
+ assert path.exists() # noqa: S101
+ dll = ctypes.cdll.LoadLibrary(str(path))
+
+ doses = [0, 50, 100, 150, 200]
+ Y = [0, 5, 30, 65, 90]
+ n_group = [100, 100, 100, 100, 100]
+ prior = [1.0, 2.0, 0.0, 0.1, 2.0, 1.0, -20.0, 1e-12, 20.0, 100.0]
+ prior_cols = 5
+ parms = int(len(prior) / prior_cols)
+ da = DichotomousAnalysis(
+ model=DichModel.d_logistic.value,
+ n=len(n_group),
+ Y=Y,
+ doses=doses,
+ n_group=n_group,
+ prior=prior,
+ BMD_type=1,
+ BMR=0.1,
+ alpha=0.05,
+ degree=parms - 1,
+ samples=100,
+ burnin=20,
+ parms=parms,
+ prior_cols=prior_cols,
+ )
+ da_res = DichotomousModelResult(model=DichModel.d_logistic.value, nparms=parms, dist_numE=200)
+
+ da_struct = da.to_c()
+ da_res_struct = da_res.to_c()
+ dll.estimate_sm_laplace_dicho(ctypes.pointer(da_struct), ctypes.pointer(da_res_struct), True)
+
+ print( # noqa: T201
+ json.dumps(dict(inputs=da_struct.dict(), outputs=da_res_struct.dict()), indent=2)
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/make.bat b/make.bat
new file mode 100644
index 00000000..f644b913
--- /dev/null
+++ b/make.bat
@@ -0,0 +1,51 @@
+@ECHO off
+
+if "%~1" == "" goto :help
+if /I %1 == help goto :help
+if /I %1 == lint goto :lint
+if /I %1 == format goto :format
+if /I %1 == test goto :test
+if /I %1 == coverage goto :coverage
+if /I %1 == build goto :build
+if /I %1 == dist goto :dist
+goto :help
+
+:help
+echo.Please use `make ^` where ^ is one of
+echo. lint perform both lint-py and lint-js
+echo. format perform both format-py and lint-js
+echo. test run python tests
+echo. coverage generate coverage report
+echo. build rebuild in development environment
+echo. dist build wheel package for distribution
+goto :eof
+
+:lint
+ruff format . --check && ruff .
+goto :eof
+
+:format
+ruff format . && ruff . --fix --show-fixes
+goto :eof
+
+:test
+py.test
+goto :eof
+
+:coverage
+coverage run -m pytest
+coverage html
+goto :eof
+
+:build
+python setup.py develop
+stubgen -p pybmds.bmdscore -o src
+ruff format src\pybmds\bmdscore.pyi
+goto :eof
+
+:dist
+rmdir /s /q .\build
+rmdir /s /q .\dist
+python setup.py bdist_wheel
+dir .\dist
+goto :eof
diff --git a/pybind11.cmake b/pybind11.cmake
new file mode 100644
index 00000000..42745ac9
--- /dev/null
+++ b/pybind11.cmake
@@ -0,0 +1,15 @@
+include(FetchContent)
+
+set(FETCHCONTENT_QUIET FALSE)
+
+FetchContent_Declare(
+ pybind11
+ URL https://github.com/pybind/pybind11/archive/refs/tags/v2.10.4.tar.gz
+ )
+
+FetchContent_GetProperties(pybind11)
+
+if(NOT pybind11_POPULATED)
+ FetchContent_Populate(pybind11)
+ add_subdirectory(${pybind11_SOURCE_DIR} ${pybind11_BINARY_DIR})
+endif()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..4f59ed5b
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,61 @@
+[project]
+name = "pybmds"
+description = "A description of pybmds"
+requires-python = ">=3.11"
+readme = "README.md"
+authors = [
+ {name = "BMDS development team"},
+]
+dynamic = ["version"]
+dependencies = [
+ "typer >= 0.7.0",
+]
+
+license = {text = "MIT"}
+classifiers = [
+ "Development Status :: 3 - Alpha",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: MIT License",
+ "Natural Language :: English",
+ "Programming Language :: Python :: 3.11",
+]
+
+[tool.setuptools.packages.find]
+where = ["src"]
+include = ["pybmds"]
+
+[tool.setuptools.dynamic]
+version = {attr = "pybmds.__version__"}
+
+[project.scripts]
+pybmds = "pybmds.cli:app"
+
+[build-system]
+requires = [
+ "setuptools>=67.7.2",
+ "pybind11>=2.10.4",
+]
+build-backend = "setuptools.build_meta"
+
+[tool.cibuildwheel]
+test-requires = "pytest"
+test-command = "pytest {project}/tests"
+
+[tool.coverage.run]
+include = [
+ "src/pybmds/*"
+]
+
+[tool.pytest.ini_options]
+testpaths = "tests/test_pybmds"
+
+[tool.ruff]
+line-length = 100
+target-version = "py311"
+lint.select = ["F", "E", "W", "I", "UP", "S", "B", "T20", "RUF"]
+lint.ignore = ["E501", "B904", "B007", "S308", "S113", "S314"]
+lint.isort.known-first-party = ["pybmds"]
+
+[tool.ruff.lint.per-file-ignores]
+"test_*.py" = ["S101", "S106"]
+"setup.py" = ["S603", "S607"]
diff --git a/requirements_dev.txt b/requirements_dev.txt
new file mode 100644
index 00000000..8919cf7a
--- /dev/null
+++ b/requirements_dev.txt
@@ -0,0 +1,13 @@
+# build
+wheel~=0.43.0
+build==1.2.1
+pybind11==2.10.4
+setuptools~=69.5.1
+mypy~=1.10
+
+# tests
+coverage==7.5.1
+pytest==8.2.0
+
+# lint and formatting tools
+ruff~=0.4.3
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..fc9b4b81
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,118 @@
+import os
+import re
+import subprocess
+import sys
+from pathlib import Path
+
+from setuptools import Extension, setup
+from setuptools.command.build_ext import build_ext
+
+# Convert distutils Windows platform specifiers to CMake -A arguments
+PLAT_TO_CMAKE = {
+ "win32": "Win32",
+ "win-amd64": "x64",
+ "win-arm32": "ARM",
+ "win-arm64": "ARM64",
+}
+
+
+# A CMakeExtension needs a sourcedir instead of a file list.
+# The name must be the _single_ output extension from the CMake build.
+# If you need multiple extensions, see scikit-build.
+class CMakeExtension(Extension):
+ def __init__(self, name: str, sourcedir: str = "") -> None:
+ super().__init__(name, sources=[])
+ self.sourcedir = os.fspath(Path(sourcedir).resolve())
+
+
+class CMakeBuild(build_ext):
+ def build_extension(self, ext: CMakeExtension) -> None:
+ # Must be in this form due to bug in .resolve() only fixed in Python 3.10+
+ ext_fullpath = Path.cwd() / self.get_ext_fullpath(ext.name)
+ extdir = ext_fullpath.parent.resolve()
+
+ # Using this requires trailing slash for auto-detection & inclusion of
+ # auxiliary "native" libs
+
+ debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
+ cfg = "Debug" if debug else "Release"
+
+ # CMake lets you override the generator - we need to check this.
+ # Can be set with Conda-Build, for example.
+ cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
+
+ # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
+ cmake_args = [
+ f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}{os.sep}",
+ f"-DPYTHON_EXECUTABLE={sys.executable}",
+ f"-DCMAKE_BUILD_TYPE={cfg}", # not used on MSVC, but no harm
+ ]
+ build_args = []
+ # Adding CMake arguments set as environment variable
+ # (needed e.g. to build for ARM OSx on conda-forge)
+ if "CMAKE_ARGS" in os.environ:
+ cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]
+
+ if self.compiler.compiler_type != "msvc":
+ # Using Ninja-build since it a) is available as a wheel and b)
+ # multithreads automatically. MSVC would require all variables be
+ # exported for Ninja to pick it up, which is a little tricky to do.
+ # Users can override the generator with CMAKE_GENERATOR in CMake
+ # 3.15+.
+ if not cmake_generator or cmake_generator == "Ninja":
+ try:
+ import ninja
+
+ ninja_executable_path = Path(ninja.BIN_DIR) / "ninja"
+ cmake_args += [
+ "-GNinja",
+ f"-DCMAKE_MAKE_PROGRAM:FILEPATH={ninja_executable_path}",
+ ]
+ except ImportError:
+ pass
+
+ else:
+ # Single config generators are handled "normally"
+ single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
+
+ # CMake allows an arch-in-generator style for backward compatibility
+ contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
+
+ # Specify the arch if using MSVC generator, but only if it doesn't
+ # contain a backward-compatibility arch spec already in the
+ # generator name.
+ if not single_config and not contains_arch:
+ cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
+
+ # Multi-config generators have a different way to specify configs
+ if not single_config:
+ cmake_args += [f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{cfg.upper()}={extdir}"]
+ build_args += ["--config", cfg]
+
+ if sys.platform.startswith("darwin"):
+ # Cross-compile support for macOS - respect ARCHFLAGS if set
+ archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
+ if archs:
+ cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]
+
+ # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
+ # across all generators.
+ if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
+ # self.parallel is a Python 3 only way to set parallel jobs by hand
+ # using -j in the build_ext call, not supported by pip or PyPA-build.
+ if hasattr(self, "parallel") and self.parallel:
+ # CMake 3.12+ only.
+ build_args += [f"-j{self.parallel}"]
+
+ build_temp = Path(self.build_temp) / ext.name
+ if not build_temp.exists():
+ build_temp.mkdir(parents=True)
+
+ subprocess.run(["cmake", ext.sourcedir, *cmake_args], cwd=build_temp, check=True)
+ subprocess.run(["cmake", "--build", ".", *build_args], cwd=build_temp, check=True)
+
+
+setup(
+ ext_modules=[CMakeExtension("pybmds.bmdscore")],
+ cmdclass={"build_ext": CMakeBuild},
+)
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
new file mode 100755
index 00000000..bbca2a1b
--- /dev/null
+++ b/src/CMakeLists.txt
@@ -0,0 +1,163 @@
+cmake_minimum_required(VERSION 3.1)
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+IF(DEFINED ENV{EIGEN_DIR})
+ set(EIGEN_DIR $ENV{EIGEN_DIR})
+ENDIF()
+IF(DEFINED ENV{GSL_DIR})
+ set(GSL_DIR $ENV{GSL_DIR})
+ IF(NOT WIN32)
+ set(GSL_ROOT_DIR $ENV{GSL_DIR})
+ ENDIF()
+ENDIF()
+IF(DEFINED ENV{NLOPT_DIR})
+ set(NLOPT_DIR $ENV{NLOPT_DIR})
+ENDIF()
+IF(DEFINED ENV{CMAKE_C_COMPILER})
+ set(CMAKE_C_COMPILER $ENV{CMAKE_C_COMPILER})
+ENDIF()
+IF(DEFINED ENV{CMAKE_CXX_COMPILER})
+ set(CMAKE_CXX_COMPILER $ENV{CMAKE_CXX_COMPILER})
+ENDIF()
+
+IF(WIN32)
+ add_compile_definitions(RBMDS_EXPORTS)
+ENDIF()
+
+project(bmdscore VERSION 0.1.0)
+
+#Find libraries
+IF(WIN32)
+ find_library(GSL_LIB gsl REQUIRED HINTS ${GSL_DIR})
+ELSE()
+ find_package(GSL REQUIRED)
+ENDIF()
+find_library(NLOPT_LIB NAMES NLopt nlopt libnlopt REQUIRED HINTS ${NLOPT_DIR})
+
+# Include dir
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/code_base ${CMAKE_CURRENT_SOURCE_DIR}/include ${EIGEN_DIR} ${GSL_DIR} ${NLOPT_DIR}/../include)
+
+#lib dirs
+link_directories(${GSL_LIB_DIR} ${NLOPT_LIB_DIR})
+
+# Src
+AUX_SOURCE_DIRECTORY(code_base SRC_FILES)
+
+# Headers
+set(PROJECT_SOURCE_DIR "code_base")
+set(PROJECT_INCLUDE_DIR "include")
+
+# Source files
+set(SOURCE_FILES
+ ${PROJECT_SOURCE_DIR}/analysis_of_deviance.cpp
+ ${PROJECT_SOURCE_DIR}/analysis_of_deviance.h
+ ${PROJECT_SOURCE_DIR}/bmds_helper.cpp
+ ${PROJECT_SOURCE_DIR}/bmds_helper.h
+ ${PROJECT_SOURCE_DIR}/bmdStruct.cpp
+ ${PROJECT_SOURCE_DIR}/bmdStruct.h
+ ${PROJECT_SOURCE_DIR}/continuous_clean_aux.cpp
+ ${PROJECT_SOURCE_DIR}/continuous_clean_aux.h
+ ${PROJECT_SOURCE_DIR}/continuous_entry_code.cpp
+ ${PROJECT_SOURCE_DIR}/continuous_entry_code.h
+ ${PROJECT_SOURCE_DIR}/continuous_model_functions.cpp
+ ${PROJECT_SOURCE_DIR}/continuous_model_functions.h
+ ${PROJECT_SOURCE_DIR}/DichGammaBMD_NC.cpp
+ ${PROJECT_SOURCE_DIR}/DichLogisticBMD_NC.cpp
+ ${PROJECT_SOURCE_DIR}/DichLogProbitBMD_NC.cpp
+ ${PROJECT_SOURCE_DIR}/dichotomous_entry_code.cpp
+ ${PROJECT_SOURCE_DIR}/dichotomous_entry_code.h
+ ${PROJECT_SOURCE_DIR}/DichProbitBMD_NC.cpp
+ ${PROJECT_SOURCE_DIR}/DichQlinearBMD_NC.cpp
+ ${PROJECT_SOURCE_DIR}/DichWeibullBMD_NC.cpp
+ ${PROJECT_SOURCE_DIR}/framework.h
+ ${PROJECT_SOURCE_DIR}/gradient.cpp
+ ${PROJECT_SOURCE_DIR}/gradient.h
+ ${PROJECT_SOURCE_DIR}/helperfunctions.cpp
+ ${PROJECT_SOURCE_DIR}/IDPrior.cpp
+ ${PROJECT_SOURCE_DIR}/IDPriorMCMC.cpp
+ ${PROJECT_SOURCE_DIR}/list_r_conversion.h
+ ${PROJECT_SOURCE_DIR}/lognormal_EXP_NC.cpp
+ ${PROJECT_SOURCE_DIR}/lognormal_HILL_NC.cpp
+ ${PROJECT_SOURCE_DIR}/lognormal_POLYNOMIAL_NC.cpp
+ ${PROJECT_SOURCE_DIR}/lognormal_POWER_NC.cpp
+ ${PROJECT_SOURCE_DIR}/lognormalModels.cpp
+ ${PROJECT_SOURCE_DIR}/normal_EXP_NC.cpp
+ ${PROJECT_SOURCE_DIR}/normal_FUNL_NC.cpp
+ ${PROJECT_SOURCE_DIR}/normal_HILL_NC.cpp
+ ${PROJECT_SOURCE_DIR}/normal_likelihoods.cpp
+ ${PROJECT_SOURCE_DIR}/normal_POLYNOMIAL_NC.cpp
+ ${PROJECT_SOURCE_DIR}/normal_POWER_NC.cpp
+ ${PROJECT_SOURCE_DIR}/normalModels.cpp
+# ${PROJECT_SOURCE_DIR}/pch.cpp
+# ${PROJECT_SOURCE_DIR}/pch.h
+ ${PROJECT_SOURCE_DIR}/stdafx.cpp
+ ${PROJECT_SOURCE_DIR}/stdafx.h
+ ${PROJECT_SOURCE_DIR}/targetver.h
+ ${PROJECT_INCLUDE_DIR}/binomialTests.h
+ ${PROJECT_INCLUDE_DIR}/binomModels.h
+ ${PROJECT_INCLUDE_DIR}/bmd_calculate.h
+ ${PROJECT_INCLUDE_DIR}/bmds_entry.h
+ ${PROJECT_INCLUDE_DIR}/cBMDstatmod.h
+ ${PROJECT_INCLUDE_DIR}/cmodeldefs.h
+ ${PROJECT_INCLUDE_DIR}/dBMDstatmod.h
+ ${PROJECT_INCLUDE_DIR}/DichGammaBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichHillBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichLogisticBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichLogLogisticBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichLogProbitBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichMultistageBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichProbitBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichQlinearBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/DichWeibullBMD_NC.h
+ ${PROJECT_INCLUDE_DIR}/IDPrior.h
+ ${PROJECT_INCLUDE_DIR}/IDPriorMCMC.h
+ ${PROJECT_INCLUDE_DIR}/log_likelihoods.h
+ ${PROJECT_INCLUDE_DIR}/lognormal_EXP_NC.h
+ ${PROJECT_INCLUDE_DIR}/lognormal_HILL_NC.h
+ ${PROJECT_INCLUDE_DIR}/lognormal_likelihoods.h
+ ${PROJECT_INCLUDE_DIR}/lognormal_POLYNOMIAL_NC.h
+ ${PROJECT_INCLUDE_DIR}/lognormal_POWER_NC.h
+ ${PROJECT_INCLUDE_DIR}/lognormalModels.h
+ ${PROJECT_INCLUDE_DIR}/lognormalTests.h
+ ${PROJECT_INCLUDE_DIR}/mcmc_anal.h
+ ${PROJECT_INCLUDE_DIR}/mcmc_analysis.h
+ ${PROJECT_INCLUDE_DIR}/mcmc_struct.h
+ ${PROJECT_INCLUDE_DIR}/normal_EXP_NC.h
+ ${PROJECT_INCLUDE_DIR}/normal_FUNL_NC.h
+ ${PROJECT_INCLUDE_DIR}/normal_HILL_NC.h
+ ${PROJECT_INCLUDE_DIR}/normal_likelihoods.h
+ ${PROJECT_INCLUDE_DIR}/normal_POLYNOMIAL_NC.h
+ ${PROJECT_INCLUDE_DIR}/normal_POWER_NC.h
+ ${PROJECT_INCLUDE_DIR}/normalModels.h
+ ${PROJECT_INCLUDE_DIR}/statisticalmodel.h
+ ${PROJECT_INCLUDE_DIR}/statmod.h
+ )
+
+# Set up such that XCode organizes the files correctly
+source_group(TREE ${CMAKE_CURRENT_SOURCE_DIR} FILES ${SOURCE_FILES})
+
+# Add library
+IF(WIN32)
+ #Windows build
+ add_library(bmdscore SHARED ${SOURCE_FILES})
+ target_link_libraries(bmdscore PRIVATE ${GSL_LIB} ${NLOPT_LIB})
+ELSE()
+ #Linux or MacOS
+ add_library(bmdscore STATIC ${SOURCE_FILES})
+ target_link_libraries(bmdscore PRIVATE GSL::gsl GSL::gslcblas ${NLOPT_LIB})
+ENDIF(WIN32)
+
+# Include directories
+target_include_directories(bmdscore PRIVATE include)
+
+# Install
+#install(TARGETS bmdscore DESTINATION )
+
+# Install the headers
+#install(FILES include/bmdscore DESTINATION include)
+
+# Create base directory
+#install(DIRECTORY include DESTINATION include)
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 00000000..b214a851
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1,33 @@
+#USE_SIMD = 0 NO VECTOR INSTRUCTIONS -DUSE_SIMD=1
+#USE_SIMD = 1 USE AVX/AVX2 INSTRUCTIONS
+#USE_SIMD = 2 USE AVX2512 INSTRUCTIONS-DUSE_SIMD=1 -DNDEBUG -DEIGEN_NO_DEBUG -DEIGEN_NO_DEBUG
+ #-fopenmp
+
+DISTRO = $(shell uname)
+
+#old emvl
+#PKG_CPPFLAGS = -I"./code_base" -I"./include" -O3 -funroll-loops -std=c++11 -mavx2 -ftree-vectorize
+#PKG_CPPFLAGS += -DR_COMPILATION=TRUE -Wno-ignored-attributes -mtune=native -flto @OPENMP_CFLAGS@
+
+#new wheeler
+ifeq ($(DISTRO), Linux)
+ PKG_CPPFLAGS = -I"./include" -I"./code_base" -ftree-vectorize -Os -march=native
+ PKG_CPPFLAGS += $(shell pkg-config --cflags nlopt gsl) -DR_COMPILATION -Wno-ignored-attributes -flto -fopenmp
+ PKG_LIBS = $(shell pkg-config --libs nlopt gsl)
+else
+ PKG_CPPFLAGS = -I"./code_base" -I"./include" -ftree-vectorize -Os -fopenmp
+ PKG_CPPFLAGS += -DR_COMPILATION -Wno-ignored-attributes -DNDEBUG $(shell pkg-config --cflags nlopt gsl) #-fopenmp
+# PKG_LIBS = -lomp -Wl, -Bstatic $(shell pkg-config --libs nlopt gsl)
+ PKG_LIBS = -lomp $(shell pkg-config --libs nlopt gsl)
+endif
+
+#MAKEFLAGS = -j4
+
+
+POLYK = $(wildcard polyK/*.cpp)
+MAIN = $(wildcard *.cpp)
+MAIN_CODE = $(wildcard code_base/*.cpp)
+OBJECTS = $(MAIN:.cpp=.o) $(MAIN_CODE:.cpp=.o) $(POLYK:.cpp=.o)
+
+
+
diff --git a/src/code_base/DichGammaBMD_NC.cpp b/src/code_base/DichGammaBMD_NC.cpp
index aaf17724..efe620c1 100644
--- a/src/code_base/DichGammaBMD_NC.cpp
+++ b/src/code_base/DichGammaBMD_NC.cpp
@@ -1,4 +1,4 @@
-#include
+#include "DichGammaBMD_NC.h"
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/DichLogisticBMD_NC.cpp b/src/code_base/DichLogisticBMD_NC.cpp
index d460efdb..3d21f638 100644
--- a/src/code_base/DichLogisticBMD_NC.cpp
+++ b/src/code_base/DichLogisticBMD_NC.cpp
@@ -1,4 +1,4 @@
-#include
+#include "DichLogisticBMD_NC.h"
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/DichProbitBMD_NC.cpp b/src/code_base/DichProbitBMD_NC.cpp
index e4a724ec..4d341692 100644
--- a/src/code_base/DichProbitBMD_NC.cpp
+++ b/src/code_base/DichProbitBMD_NC.cpp
@@ -1,4 +1,4 @@
-#include
+#include "DichProbitBMD_NC.h"
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/DichQlinearBMD_NC.cpp b/src/code_base/DichQlinearBMD_NC.cpp
index 600aa326..4fc1bd7a 100644
--- a/src/code_base/DichQlinearBMD_NC.cpp
+++ b/src/code_base/DichQlinearBMD_NC.cpp
@@ -1,4 +1,4 @@
-#include
+#include "DichQlinearBMD_NC.h"
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/DichWeibullBMD_NC.cpp b/src/code_base/DichWeibullBMD_NC.cpp
index a03cffab..fbc7c8f8 100644
--- a/src/code_base/DichWeibullBMD_NC.cpp
+++ b/src/code_base/DichWeibullBMD_NC.cpp
@@ -1,4 +1,4 @@
-#include
+#include "DichWeibullBMD_NC.h"
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/IDPrior.cpp b/src/code_base/IDPrior.cpp
index a533c81f..777fcfa0 100644
--- a/src/code_base/IDPrior.cpp
+++ b/src/code_base/IDPrior.cpp
@@ -1,4 +1,8 @@
-#include "stdafx.h" // Precompiled header - does nothing if building R version
+#ifdef WIN32
+ #include "pch.h"
+#else
+ #include "stdafx.h"
+#endif // Precompiled header - does nothing if building R version
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/IDPriorMCMC.cpp b/src/code_base/IDPriorMCMC.cpp
index 17f654cd..18e96c19 100644
--- a/src/code_base/IDPriorMCMC.cpp
+++ b/src/code_base/IDPriorMCMC.cpp
@@ -1,4 +1,8 @@
-#include "stdafx.h" // Precompiled header - does nothing if building R version
+#ifdef WIN32
+ #include "pch.h"
+#else
+ #include "stdafx.h"
+#endif // Precompiled header - does nothing if building R version
#ifdef R_COMPILATION
//necessary things to run in R
diff --git a/src/code_base/Makefile.am b/src/code_base/Makefile.am
index e66086bf..018455ba 100644
--- a/src/code_base/Makefile.am
+++ b/src/code_base/Makefile.am
@@ -2,9 +2,9 @@ noinst_LTLIBRARIES = libBMDS.la
AM_CPPFLAGS= -I$(top_srcdir)/code_base -I$(top_srcdir)/include -I$(EIGEN_INCLUDE)
AM_CPPFLAGS+= -O3 -funroll-loops -std=c++11
-AM_CPPFLAGS+= -fopenmp -Wno-ignored-attributes -DEIGEN_NO_DEBUG -march=native
+AM_CPPFLAGS+= @OPENMP_CFLAGS@ -Wno-ignored-attributes -DEIGEN_NO_DEBUG -march=native
-libBMDS_la_SOURCES= DichLogProbitBMD_NC.cpp lognormal_HILL_NC.cpp \
+libBMDS_la_SOURCES= bmds_helper.cpp analysis_of_deviance.cpp DichLogProbitBMD_NC.cpp lognormal_HILL_NC.cpp \
bmdStruct.cpp dichotomous_entry_code.cpp lognormalModels.cpp \
DichProbitBMD_NC.cpp lognormal_POLYNOMIAL_NC.cpp \
DichQlinearBMD_NC.cpp lognormal_POWER_NC.cpp \
@@ -12,7 +12,7 @@ libBMDS_la_SOURCES= DichLogProbitBMD_NC.cpp lognormal_HILL_NC.cpp \
continuous_clean_aux.cpp normal_HILL_NC.cpp normal_FUNL_NC.cpp\
continuous_entry_code.cpp gradient.cpp normalModels.cpp \
continuous_model_functions.cpp helperfunctions.cpp normal_POLYNOMIAL_NC.cpp \
- DichGammaBMD_NC.cpp IDPrior.cpp normal_POWER_NC.cpp \
+ DichGammaBMD_NC.cpp IDPrior.cpp IDPriorMCMC.cpp normal_POWER_NC.cpp \
DichLogisticBMD_NC.cpp lognormal_EXP_NC.cpp normal_likelihoods.cpp stdafx.cpp
diff --git a/src/code_base/analysis_of_deviance.cpp b/src/code_base/analysis_of_deviance.cpp
index 022d1e84..4f2d3bf7 100644
--- a/src/code_base/analysis_of_deviance.cpp
+++ b/src/code_base/analysis_of_deviance.cpp
@@ -19,6 +19,11 @@
*
*
*/
+#ifdef WIN32
+ #include "pch.h"
+#else
+ #include "stdafx.h"
+#endif
#include "bmd_calculate.h"
#include "bmdStruct.h"
diff --git a/src/code_base/bmdStruct.h b/src/code_base/bmdStruct.h
index e5263d54..0410aa1c 100644
--- a/src/code_base/bmdStruct.h
+++ b/src/code_base/bmdStruct.h
@@ -25,8 +25,22 @@
#ifndef BMD_ANALYSIS_h
#define BMD_ANALYSIS_h
+#ifndef M_PI
+#define M_PI (3.14159265358979323845)
+#endif
+
+#ifdef __cplusplus
+#include
+#endif
+
#include "cmodeldefs.h"
+#ifdef _WIN64
+#pragma pack(8)
+#elif _WIN32
+#pragma pack(4)
+#endif
+
// Dichotomous Structures
//
// dichotomous_analysis:
@@ -223,6 +237,9 @@ struct continuous_deviance{
int NR;
};
+#ifdef _WIN32
+#pragma pack()
+#endif
// odds and ends
struct bmd_analysis_MCMC * new_mcmc_analysis(int model,
diff --git a/src/code_base/bmds_helper.cpp b/src/code_base/bmds_helper.cpp
new file mode 100644
index 00000000..43c4e8cb
--- /dev/null
+++ b/src/code_base/bmds_helper.cpp
@@ -0,0 +1,3826 @@
+#ifdef WIN32
+ #include "pch.h"
+#else
+ #include "stdafx.h"
+#endif
+#include
+#include
+#include
+//#include
+#include
+#include "bmds_helper.h"
+#include "analysis_of_deviance.h"
+
+// calendar versioning; see https://peps.python.org/pep-0440/#pre-releases
+std::string BMDS_VERSION = "2023.10a1";
+
+
+double python_dichotomous_model_result::getSRAtDose(double targetDose, std::vector doses){
+ std::vector diff;
+ double absDiff = DBL_MAX;
+ double srVal = BMDS_MISSING;
+ if(!bmdsRes.validResult || doses.size() != gof.residual.size()){
+ return BMDS_MISSING;
+ }
+ for (int i=0; ibounded.clear();
+ for (int i=0; ibounded.push_back(false);
+ //5*i+4 is location of min in prior array
+ //5*i+5 is location of max in prior array
+ if (fabs(parms[i]-lowerBound[i]) < BMDS_EPS || fabs(parms[i]-upperBound[i]) < BMDS_EPS){
+ bounded++;
+ BMDSres->bounded[i] = true;
+ }
+ }
+ return bounded;
+}
+
+
+void calcDichoAIC(struct dichotomous_analysis *anal, struct dichotomous_model_result *res, struct BMDS_results *BMDSres, double estParmCount){
+
+ bool freqModel = anal->prior[0] == 0;
+
+// // First find number of bounded parms
+ //int bounded = checkForBoundedParms(anal->parms, res->parms, anal->prior, BMDSres);
+
+ double* lowerBound = (double*)malloc(anal->parms * sizeof(double));
+ double* upperBound = (double*)malloc(anal->parms * sizeof(double));
+
+ //copy prior values
+ for (int i=0; iparms; i++){
+ lowerBound[i] = anal->prior[3*anal->parms+i];
+ upperBound[i] = anal->prior[4*anal->parms+i];
+ }
+
+ //scale priors as needed
+ rescale_dichoParms(anal->model, lowerBound);
+ rescale_dichoParms(anal->model, upperBound);
+
+ int bounded = checkForBoundedParms(anal->parms, res->parms, lowerBound, upperBound, BMDSres);
+
+ estParmCount = anal->parms - bounded;
+
+ //if freq then model_df should be rounded to nearest whole number
+ if (freqModel)
+ estParmCount = round(estParmCount);
+ BMDSres->AIC = 2*(res->max + estParmCount);
+
+ free(lowerBound);
+ free(upperBound);
+}
+
+void calcContAIC(struct continuous_analysis *anal, struct continuous_model_result *res, struct BMDS_results *BMDSres){
+
+ bool freqModel = anal->prior[0] == 0;
+
+ // First find number of bounded parms
+ double* lowerBound = (double*)malloc(anal->parms * sizeof(double));
+ double* upperBound = (double*)malloc(anal->parms * sizeof(double));
+
+ //copy prior values
+ for (int i=0; iparms; i++){
+ lowerBound[i] = anal->prior[3*anal->parms+i];
+ upperBound[i] = anal->prior[4*anal->parms+i];
+ }
+ //scale priors as needed
+ rescale_contParms(anal, lowerBound);
+ rescale_contParms(anal, upperBound);
+
+ if (anal->model == cont_model::exp_3){
+ for (int i=2; iparms-1; i++){
+ lowerBound[i] = lowerBound[i+1];
+ upperBound[i] = upperBound[i+1];
+ }
+ }
+ int bounded = checkForBoundedParms(res->nparms, res->parms, lowerBound, upperBound, BMDSres);
+ //int bounded = checkForBoundedParms(anal->parms, res->parms, lowerBound, upperBound, BMDSres);
+
+ double estParmCount = res->model_df - bounded;
+ //if freq then model_df should be rounded to nearest whole number
+ if (freqModel)
+ estParmCount = round(estParmCount);
+
+ BMDSres->AIC = 2*(res->max + estParmCount);
+
+ // // BMDSres->AIC = -9998.0;
+ free(lowerBound);
+ free(upperBound);
+
+}
+
+double findQuantileVals(double *quant, double *val, int arrSize, double target){
+
+ double retVal = BMDS_MISSING;
+
+ for (int i=0; i < arrSize; i++){
+ if (fabs(quant[i] - target) < BMDS_EPS && std::isfinite(val[i]) ){
+ //exact match
+ retVal = val[i];
+ break;
+ } else if (quant[i] > target && i>0){
+ // linear interpolation
+ retVal = val[i-1] + ((val[i] - val[i-1])/(quant[i] - quant[i-1])) * (target - quant[i-1]);
+ break;
+ }
+ }
+ return retVal;
+}
+
+void collect_dicho_bmd_values(struct dichotomous_analysis *anal, struct dichotomous_model_result *res, struct BMDS_results *BMDSres, double estParmCount){
+
+ int distSize = res->dist_numE*2;
+
+ double* quant = (double*)malloc(distSize / 2 * sizeof(double));
+ double* val = (double*)malloc(distSize / 2 * sizeof(double));
+
+ for (int i = 0; i < distSize/2; i++){
+ val[i] = res->bmd_dist[i];
+ }
+ for (int i = distSize/2; i < distSize; i++){
+ quant[i-distSize/2] = res->bmd_dist[i];
+ }
+
+ calcDichoAIC(anal, res, BMDSres, estParmCount);
+ BMDSres->BMD = findQuantileVals(quant, val, distSize/2, 0.50);
+ BMDSres->BMDL = findQuantileVals(quant, val, distSize/2, anal->alpha);
+ BMDSres->BMDU = findQuantileVals(quant, val, distSize/2, 1.0-anal->alpha);
+
+ free(quant);
+ free(val);
+
+}
+
+
+void collect_dichoMA_bmd_values(struct dichotomousMA_analysis *anal, struct dichotomousMA_result *res, struct BMDSMA_results *BMDSres, double alpha){
+
+ int distSize = res->dist_numE*2;
+
+ double* quantMA = (double*)malloc(distSize / 2 * sizeof(double));
+ double* valMA = (double*)malloc(distSize / 2 * sizeof(double));
+
+ for (int i = 0; i < distSize/2; i++){
+ valMA[i] = res->bmd_dist[i];
+ }
+ for (int i = distSize/2; i < distSize; i++){
+ quantMA[i-distSize/2] = res->bmd_dist[i];
+ }
+
+// calculate MA quantiles
+ BMDSres->BMD_MA = findQuantileVals(quantMA, valMA, distSize/2, 0.50);
+ BMDSres->BMDL_MA = findQuantileVals(quantMA, valMA, distSize/2, alpha);
+ BMDSres->BMDU_MA = findQuantileVals(quantMA, valMA, distSize/2, 1.0-alpha);
+
+// calculate individual model quantiles
+ for (int j=0; jnmodels; j++){
+ for (int i = 0; i < distSize/2; i++){
+ valMA[i] = res->models[j]->bmd_dist[i];
+ }
+ for (int i = distSize/2; i < distSize; i++){
+ quantMA[i-distSize/2] = res->models[j]->bmd_dist[i];
+ }
+ BMDSres->BMD[j] = findQuantileVals(quantMA, valMA, distSize/2, 0.50);
+ BMDSres->BMDL[j] = findQuantileVals(quantMA, valMA, distSize/2, alpha);
+ BMDSres->BMDU[j] = findQuantileVals(quantMA, valMA, distSize/2, 1.0-alpha);
+ }
+ free(quantMA);
+ free(valMA);
+}
+
+void collect_cont_bmd_values(struct continuous_analysis *anal, struct continuous_model_result *res, struct BMDS_results *BMDSres){
+
+ int distSize = res->dist_numE*2;
+
+ double* contVal = (double*)malloc(distSize / 2 * sizeof(double));
+ double* contQuant = (double*)malloc(distSize / 2 * sizeof(double));
+
+ for (int i = 0; i < distSize/2; i++){
+ contVal[i] = res->bmd_dist[i];
+ }
+
+ for (int i = distSize/2; i < distSize; i++){
+ contQuant[i-distSize/2] = res->bmd_dist[i];
+ }
+
+ calcContAIC(anal, res, BMDSres);
+ BMDSres->BMD = findQuantileVals(contQuant, contVal, distSize/2, 0.50);
+ BMDSres->BMDL = findQuantileVals(contQuant, contVal, distSize/2, anal->alpha);
+ BMDSres->BMDU = findQuantileVals(contQuant, contVal, distSize/2, 1.0-anal->alpha);
+
+ free(contVal);
+ free(contQuant);
+}
+
+/*****************************************************************
+ * DLgama - double log gamma
+ * input:
+ * X - double value
+ * Returns log(z-1)!
+ *************************/
+double DLgamma(double x){
+
+ double r, az, z, y, dl;
+ if (x==0) return 1;
+ z = x;
+ r = 12.0;
+ az = 1.0;
+ for (;z-r < 0; z++) az = az * z;
+ y = z * z;
+ dl=0.91893853320467274-z+(z-0.5)*log(z)+z*
+ (0.08333333333333333*y+0.0648322851140734)/
+ (y*(y+0.811320754825416)+0.01752021563249146);
+ dl = dl - log(az);
+ return dl ;
+
+}
+
+//calculate dichotomous log likelihood constant
+double LogLik_Constant(std::vector Y, std::vector n_group){
+
+ double X, N, NX, con, xf, nf, nxf, val;
+
+ con = 0;
+ for (int i=0; i= tol_act /* If prev_step was large enough*/
+ && fabs(fa) > fabs(fb) ) /* and was in true direction, */
+ { /* Interpolatiom may be tried */
+// std::cout<<"trying interpolation"<(double)0 ) /* p was calculated with the op-*/
+ q = -q; /* posite sign; make p positive */
+ else /* and assign possible minus to */
+ p = -p; /* q */
+
+ if( p < (0.75*cb*q-fabs(tol_act*q)/2) /* If b+p/q falls in [b,c]*/
+ && p < fabs(prev_step*q/2) ) /* and isn't too large */
+ new_step = p/q; /* it is accepted */
+ /* If p/q is too large then the */
+ /* bissection procedure can */
+ /* reduce [b,c] range to more */
+ /* extent */
+ }
+
+ if( fabs(new_step) < tol_act ) /* Adjust the step to be not less*/
+ {
+// std::cout<<"adjusting step"< (double)0 ) /* than tolerance */
+ new_step = tol_act;
+ else
+ new_step = -tol_act;
+ }
+
+ a = b; fa = fb; /* Save the previous approx. */
+ b += new_step;
+// printf("2nd fb calc\n");
+ fb = (*f)(nparm-1, Parms, b, ck); /* Do step to a new approxim. */
+ if( (fb > 0 && fc > 0) || (fb < 0 && fc < 0) )
+ { /* Adjust c for it to have a sign*/
+ c = a; fc = fa; /* opposite to that of b */
+ }
+// std::cout<<"a="<0; j--) {
+// printf("j=%d, poly b4=%g\n", j, poly);
+ poly=poly*D+p[j];
+// printf("poly after=%g\n", poly);
+ }
+ poly = poly*D;
+// printf("final poly=%g\n",poly);
+// printf("ck = %g\n", ck);
+ fx = poly - ck; /* ck = log(1-A) */
+ return fx;
+}
+
+
+double getclmt(python_multitumor_analysis *pyAnal, python_multitumor_result *pyRes, double Dose, double target, double maxDose, std::vector xParms, bool isBMDL){
+
+ int nT = pyRes->selectedModelIndex.size();
+ double bmr = pyAnal->BMR;
+ std::vector degree;
+ for (int j=0; jmodels[j][pyRes->selectedModelIndex[j]].degree;
+ degree.push_back(modDeg);
+ }
+ int N = xParms.size() + 1;
+ double bmd = Dose;
+ std::vector x(N);
+ int iOffset = 0;
+ x[0] = log(bmd);
+
+ std::vector> tmp2;
+ //restructure to group like terms together
+ int count = 0;
+ for (int i=0; i theParms;
+ for (int j=0; j<=degree[i]; j++){
+ theParms.push_back(xParms[count]);
+ count++;
+ }
+ tmp2.push_back(theParms);
+ }
+
+ int maxDegree = *max_element(degree.begin(), degree.end());
+ count = 1;
+ for (int j=0; j<=maxDegree; j++){
+ for (int i=0; i lb(x.size());
+ std::vector ub(x.size());
+
+ //calculate the values that will correspond to '0' and 'infinity' in BMD calcs
+ double lminbmd = log(DBL_MIN) - log(maxDose);
+ double lmaxbmd = log(DBL_MAX) - log(maxDose);
+
+ lb[0] = lminbmd; //BMD lower limit
+ for (int i=1; iselectedModelIndex.size(); i++){
+ int selIndex = pyRes->selectedModelIndex[i];
+ std::vector scaledDose = pyAnal->models[i][selIndex].doses;
+ for (int j=0; jmodels[i][selIndex].Y);
+ ineq1.n_group.push_back(pyAnal->models[i][selIndex].n_group);
+ }
+ ineq1.nObs = pyAnal->n;
+ ineq1.degree = degree;
+
+
+ nlopt::opt opt(nlopt::LD_SLSQP, x.size());
+ if (isBMDL){
+ opt.set_min_objective(objfunc_bmdl, NULL);
+ } else {
+ opt.set_min_objective(objfunc_bmdu, NULL);
+ }
+ opt.add_equality_constraint(myEqualityConstraint, &eq1, 1e-8);
+ opt.add_inequality_constraint(myInequalityConstraint1, &ineq1, 1e-8);
+ opt.set_xtol_rel(1e-8);
+ opt.set_maxeval(1000000);
+ opt.set_lower_bounds(lb);
+ opt.set_upper_bounds(ub);
+
+ double minf, val;
+ nlopt::result result = nlopt::FAILURE;
+ try{
+ result = opt.optimize(x, minf);
+// std::cout << "found minimum at f(" << x[0] << ") = " << std::setprecision(10) << minf << std::endl;
+ } catch (std::exception &e){
+ std::cout << "nlopt failed: " << e.what() << std::endl;
+ }
+
+ val = x[0];
+
+
+ return val;
+
+}
+
+
+/*****************************************************************
+ * Added by CVL 7/2007 - start block
+ * BMDL_combofunc -- returns the lower confidence limit, BMDL.
+ * external: Spec[]
+ * input:
+ * nparm is the number of parameters for Model B
+ * Anparm is the number of parameters for Model A
+ * xlk is the log-likelihood of the fitted model
+ * Dose is the BMD or upper dose limit
+ * pBak[] is the vector of fitted parameters for Model B
+ * pABak[] is the vector of fitted parameters for Model A
+ * D is a lower dose limit
+ * gtol is a small positive number (tolerance)
+ * output: lower confidence limit
+ *****************************************************************/
+//TODO: combine with BMDU_combofunc
+double BMDL_combofunc(struct python_multitumor_analysis *pyAnal, struct python_multitumor_result *pyRes, double Dose, double D, double LR, double gtol, int *is_zero)
+{ /* ck and LR are calculated in Multistage_ComboBMD() */
+
+ int optite, nresm, *bind, CBnparm;
+ int which, temprisk, lnParmMax;
+ double fD, bmdl, target, xmax, xlk2, xlk3, crisk;
+ int i, j, nCall, k, nParmMax, ii = 0;
+ double scale;
+
+// std::cout<<"inside BMDL_combofunc"< adxmax;
+ int nParms;
+ fD = bmdl = target = xmax = xlk2 = xlk3 = crisk = 0.0;
+ optite = -5;
+ nCall = 1;
+
+ temprisk = 1; //based on bmdparm.risk set to zero in original Multistage_ComboBMD
+
+ adxmax.resize(pyRes->ndatasets, 0.0);
+
+
+
+ CBnparm = lnParmMax = 0;
+ for(i = 0; i < pyRes->ndatasets; i++)
+ {
+ int selModelIndex = pyRes->selectedModelIndex[i];
+ struct python_dichotomous_analysis mod = pyAnal->models[i][selModelIndex];
+ struct python_dichotomous_model_result modRes = pyRes->models[i][selModelIndex];
+ xmax = mod.doses[0];
+ CBnparm = CBnparm + modRes.nparms;
+
+ if(modRes.nparms > lnParmMax){
+ lnParmMax = modRes.nparms;
+ }
+
+// std::cout<<"\n\nIn BMDL_combofunc, Tumor "< xmax){
+ xmax = mod.doses[j];
+ }
+// std::cout<ndatasets; i++)
+ {
+ if(adxmax[i] > xmax) xmax = adxmax[i];
+ }
+ scale = xmax;
+
+ nParmMax = (int)lnParmMax;
+ nParms = pyRes->ndatasets*nParmMax;
+ std::vector pdParms(nParms, 0.0);
+ std::vector pdParmsBak(nParms, 0.0);
+ std::vector pdParms2(nParms, 0.0);
+ std::vector pdVals(nParms, 0.0);
+ std::vector piSpec2(nParms, 0.0);
+
+
+// std::cout<<"\nIn BMDL_combofunc, pdParms[j](MLEs)"<ndatasets; i++)
+// {
+// std::cout<<"Tumor "<"<models[i][0].nparms; j++)
+// {
+// std::cout<models[i][0].parms[j]<<"\t";
+// }
+// std::cout<ndatasets-1; i>=0; i--)
+ {
+ k++;
+ if(jmodels[i][0].nparms)
+ {
+ pdParms[k] = pyRes->models[i][0].parms[j];
+ piSpec2[k] = 0.0; //no user specified values
+ }
+ }
+ }
+
+// std::cout<<"\n\nIn BMDL_combofunc, pdParms values (MLEs, k="<ndatasets; j++){
+// std::cout<<" Tumor "<ndatasets; j++){
+// std::cout<ndatasets; i++)
+// {
+// std::cout<<"Tumor "< "<combined_LL<ndatasets-1; i>=0; i--)
+ {
+ int iParms = pyRes->models[i][0].nparms;
+
+ k++;
+ if (j models[i][0].parms[j];
+ pdParms[k] = pyRes->models[i][0].parms[j]*(pow(scale,(j)));
+ } else {
+ pdParmsBak[k] = pdParms[k] = BMDS_MISSING;
+ }
+ }
+ }
+// /* One more step for the background terms */
+ for(i=0; indatasets; i++){
+ pdParms[i] = -log(1.0 - pdParms[i]);
+ }
+// std::cout<<"\n\nValues BEFORE call "<BMR<<" target="<ndatasets; j++){
+// std::cout<<"Scaled | Unscaled\t\t";
+// }
+// std::cout<ndatasets; j++){
+// std::cout<= 0, it is successful, and we can stop */
+// if(optite >= 0)
+// break;
+//#ifdef MISC_OUT
+// /* otherwise, issues another warning, and continue trying */
+// else
+// fprintf(fp_out, "**** WARNING %d: Completion code = %d trying new start****\n\n", ii, optite);
+//#endif
+// } /* end for */
+//
+// } /* end: if (optite < 0) */
+//
+// if(optite < 0 )
+// {
+//#ifdef MISC_OUT
+// /* Warn user */
+// fprintf(fp_out, "**** WARNING: Completion code = %d. Optimum not found. Trying new starting point****\n\n", optite);
+//#endif
+// /* Try up to 10 times if needed */
+// for(ii = 0; ii < 10; ii++)
+// {
+// /* Get original values */
+// k = -1;
+// for(i = 1; i <= nT; i++)
+// {
+// for(j = 1; j <= nParmMax; j++)
+// {
+// k++;
+// pdParms[k] = aParmList[i].pdParms[j] * (pow(scale,(j-1)));;
+// }
+// }
+// GetMoreParms2(pdParms, nParmMax); /* Get a new starting point */
+//
+// /* again, reparameterize p[0] */
+// for(i = 0; i < nT; i++)
+// {
+// pdParms[i] = -log(1-aParmList[i+1].pdParms[1]);
+// }
+//
+// /* Try again */
+// fprintf(fp_log,"\n\n\nValues BEFORE call %d to getclmt_()", nCall);
+// fprintf(fp_log,"BMR=%10.5g target=%10.5g\n",BMR, target);
+// fprintf(fp_log,"bmdl=%10.5g optite=%d", bmdl, optite);
+// fprintf(fp_log,"\n");
+// for(j = 1; j<=nT; j++)
+// fprintf(fp_log," Tumor %d\t", j);
+// fprintf(fp_log,"\n");
+// i = 0;
+// for(k = 0; k < nParmMax; k++)
+// {
+// for(j = 1; j <= nT; j++)
+// fprintf(fp_log,"%10.5g\t", pdParms[i++]);
+// fprintf(fp_log,"\n");
+// }
+// fflush(fp_log);
+//
+// getclmt_(&which, &lnParmMax, &BMR, &Dose,
+// &target, pdParms, piSpec2,
+// pdParms, &temprisk, &bmdl,
+// pdParms2, &optite, &nresm,
+// bind, is_zero);
+//
+// fprintf(fp_log,"\n\nValues AFTER call %d to getclmt_()", nCall);
+// fprintf(fp_log,"BMR=%10.5g target=%10.5g\n",BMR, target);
+// fprintf(fp_log,"bmdl=%10.5g optite=%d", bmdl, optite);
+// fprintf(fp_log,"\n");
+// for(j = 1; j<=nT; j++)
+// fprintf(fp_log," Tumor %d\t", j);
+// fprintf(fp_log,"\n");
+// i = 0;
+// for(k = 0; k < nParmMax; k++)
+// {
+// for(j = 1; j <= nT; j++)
+// fprintf(fp_log,"%10.5g\t", pdParms[i++]);
+// fprintf(fp_log,"\n");
+// }
+// nCall++;
+// fflush(fp_log);
+//
+// /* if optite >= 0, it is successful, and we can stop */
+// if(optite >= 0)
+// break;
+//#ifdef MISC_OUT
+// /* otherwise, issues another warning, and continue trying */
+// else
+// fprintf(fp_out, "**** WARNING %d: Completion code = %d trying new start****\n\n", ii, optite);
+//#endif
+// } /* end for */
+//
+// } /* end: if (optite < 0) */
+//
+//
+// /* Let user know if no optimum was found */
+// if(ii == 10)
+// {
+//#ifdef MISC_OUT
+// fprintf(fp_out, "\nWarning: completion code still negative");
+//#endif
+// fprintf(fp_out, "\nBMDL did not converge for BMR = %f\n", BMR);
+// bmdl_bmr_flag = 1;
+// } /* end if */
+
+
+
+// std::cout<<"Here after bmdl calc"<ndatasets; i++){
+ nT++;
+ }
+ std::vector> ppdParms(nT, std::vector (nParmMax, BMDS_MISSING));
+// std::cout<<"********** pdParms2 Values **********"<ndatasets; j++){
+// std::cout<<" Tumor " << j << "\t";
+// }
+// std::cout<ndatasets; i++)
+ {
+ k++;
+// std::cout<models[i][0].nparms)
+ {
+ ppdParms[i][j] = pdParms2[k];
+ if(k < pyAnal->ndatasets)
+ {
+ ppdParms[i][j] = 1-exp(-pdParms2[k]);
+ }
+ }
+ }
+// std::cout< adxmax;
+ int nParms;
+ fD = bmdu = target = xmax = xlk2 = xlk3 = crisk = 0.0;
+ optite = -5;
+ nCall = 1;
+
+ temprisk = 1; //based on bmdparm.risk set to zero in original Multistage_ComboBMD
+
+
+ /* Get the degree of polynomial */
+ adxmax.resize(pyRes->ndatasets, 0.0);
+
+ CBnparm = lnParmMax = 0;
+ for(i = 0; i < pyRes->ndatasets; i++)
+ {
+ int selModelIndex = pyRes->selectedModelIndex[i];
+ struct python_dichotomous_analysis mod = pyAnal->models[i][selModelIndex];
+ struct python_dichotomous_model_result modRes = pyRes->models[i][selModelIndex];
+ xmax = mod.doses[0];
+ CBnparm = CBnparm + modRes.nparms;
+
+ if(modRes.nparms > lnParmMax){
+ lnParmMax = modRes.nparms;
+ }
+
+// std::cout<<"\n\nIn BMDU_combofunc, Tumor "< xmax){
+ xmax = mod.doses[j];
+ }
+// std::cout<ndatasets; i++)
+ {
+ if(adxmax[i] > xmax) xmax = adxmax[i];
+ }
+ scale = xmax;
+
+ nParmMax = (int)lnParmMax;
+ nParms = pyRes->ndatasets*nParmMax;
+ std::vector pdParms(nParms, 0.0);
+ std::vector pdParmsBak(nParms, 0.0);
+ std::vector pdParms2(nParms, 0.0);
+ std::vector pdVals(nParms, 0.0);
+ std::vector piSpec2(nParms, 0.0);
+
+
+// std::cout<<"\nIn BMDU_combofunc, pdParms[j](MLEs)"<ndatasets; i++)
+// {
+// std::cout<<"Tumor "<"<models[i][0].nparms; j++)
+// {
+// std::cout<models[i][0].parms[j]<<"\t";
+// }
+// std::cout<ndatasets-1; i>=0; i--)
+ {
+ k++;
+ if(jmodels[i][0].nparms)
+ {
+ pdParms[k] = pyRes->models[i][0].parms[j];
+ piSpec2[k] = 0.0; //no user specified values
+ }
+ }
+ }
+
+// std::cout<<"\n\nIn BMDU_combofunc, pdParms values (MLEs, k="<ndatasets; j++){
+// std::cout<<" Tumor "<ndatasets; j++){
+// std::cout<ndatasets; i++)
+// {
+// std::cout<<"Tumor "< "<combined_LL<ndatasets-1; i>=0; i--)
+ {
+ int iParms = pyRes->models[i][0].nparms;
+
+ k++;
+ if (j models[i][0].parms[j];
+ pdParms[k] = pyRes->models[i][0].parms[j]*(pow(scale,(j)));
+ } else {
+ pdParmsBak[k] = pdParms[k] = BMDS_MISSING;
+ }
+ }
+ }
+ for(i=0; indatasets; i++){
+ pdParms[i] = -log(1.0 - pdParms[i]);
+ }
+// std::cout<<"\n\nValues BEFORE call "<BMR<<" target="<ndatasets; j++){
+// std::cout<<"Scaled | Unscaled\t\t";
+// }
+// std::cout<ndatasets; j++){
+// std::cout<= 0, it is successful, and we can stop */
+// if(optite >= 0)
+// break;
+//#ifdef MISC_OUT
+// /* otherwise, issues another warning, and continue trying */
+// else
+// fprintf(fp_out, "**** WARNING %d: Completion code = %d trying new start****\n\n", ii, optite);
+//#endif
+// } /* end for */
+//
+// } /* end: if (optite < 0) */
+//
+// if(optite < 0 )
+// {
+//#ifdef MISC_OUT
+// /* Warn user */
+// fprintf(fp_out, "**** WARNING: Completion code = %d. Optimum not found. Trying new starting point****\n\n", optite);
+//#endif
+// /* Try up to 10 times if needed */
+// for(ii = 0; ii < 10; ii++)
+// {
+// /* Get original values */
+// k = -1;
+// for(i = 1; i <= nT; i++)
+// {
+// for(j = 1; j <= nParmMax; j++)
+// {
+// k++;
+// pdParms[k] = aParmList[i].pdParms[j] * (pow(scale,(j-1)));;
+// }
+// }
+// GetMoreParms2(pdParms, nParmMax); /* Get a new starting point */
+//
+// /* again, reparameterize p[0] */
+// for(i = 0; i < nT; i++)
+// {
+// pdParms[i] = -log(1-aParmList[i+1].pdParms[1]);
+// }
+//
+// /* Try again */
+// fprintf(fp_log,"\n\n\nValues BEFORE call %d to getclmt_()", nCall);
+// fprintf(fp_log,"BMR=%10.5g target=%10.5g\n",BMR, target);
+// fprintf(fp_log,"bmdl=%10.5g optite=%d", bmdl, optite);
+// fprintf(fp_log,"\n");
+// for(j = 1; j<=nT; j++)
+// fprintf(fp_log," Tumor %d\t", j);
+// fprintf(fp_log,"\n");
+// i = 0;
+// for(k = 0; k < nParmMax; k++)
+// {
+// for(j = 1; j <= nT; j++)
+// fprintf(fp_log,"%10.5g\t", pdParms[i++]);
+// fprintf(fp_log,"\n");
+// }
+// fflush(fp_log);
+//
+// getclmt_(&which, &lnParmMax, &BMR, &Dose,
+// &target, pdParms, piSpec2,
+// pdParms, &temprisk, &bmdl,
+// pdParms2, &optite, &nresm,
+// bind, is_zero);
+//
+// fprintf(fp_log,"\n\nValues AFTER call %d to getclmt_()", nCall);
+// fprintf(fp_log,"BMR=%10.5g target=%10.5g\n",BMR, target);
+// fprintf(fp_log,"bmdl=%10.5g optite=%d", bmdl, optite);
+// fprintf(fp_log,"\n");
+// for(j = 1; j<=nT; j++)
+// fprintf(fp_log," Tumor %d\t", j);
+// fprintf(fp_log,"\n");
+// i = 0;
+// for(k = 0; k < nParmMax; k++)
+// {
+// for(j = 1; j <= nT; j++)
+// fprintf(fp_log,"%10.5g\t", pdParms[i++]);
+// fprintf(fp_log,"\n");
+// }
+// nCall++;
+// fflush(fp_log);
+//
+// /* if optite >= 0, it is successful, and we can stop */
+// if(optite >= 0)
+// break;
+//#ifdef MISC_OUT
+// /* otherwise, issues another warning, and continue trying */
+// else
+// fprintf(fp_out, "**** WARNING %d: Completion code = %d trying new start****\n\n", ii, optite);
+//#endif
+// } /* end for */
+//
+// } /* end: if (optite < 0) */
+//
+//
+// /* Let user know if no optimum was found */
+// if(ii == 10)
+// {
+//#ifdef MISC_OUT
+// fprintf(fp_out, "\nWarning: completion code still negative");
+//#endif
+// fprintf(fp_out, "\nBMDL did not converge for BMR = %f\n", BMR);
+// bmdl_bmr_flag = 1;
+// } /* end if */
+
+
+// double **ppdParms;
+// ppdParms = DMATRIX (1, nT, 1, nParmMax);
+
+ pdParms2 = pdParms;
+ int nT = 0;
+ for(int i=0; indatasets; i++){
+ nT++;
+ }
+ std::vector> ppdParms(nT, std::vector (nParmMax, BMDS_MISSING));
+// std::cout<<"********** pdParms2 Values **********"<ndatasets; j++){
+// std::cout<<" Tumor " << j << "\t";
+// }
+// std::cout<ndatasets; i++)
+ {
+ k++;
+// std::cout<models[i][0].nparms)
+ {
+ ppdParms[i][j] = pdParms2[k];
+ if(k < pyAnal->ndatasets)
+ {
+ ppdParms[i][j] = 1-exp(-pdParms2[k]);
+ }
+ }
+ }
+// std::cout<> p, python_multitumor_analysis *pyAnal, python_multitumor_result *pyRes){
+
+ int nObs, nT, nParms;
+ double prob, like, dSumParm1, pr, bkg;
+
+ prob = like = dSumParm1 = 0.0;
+
+ for(int n=0; nndatasets; n++){
+ dSumParm1 += p[n][0];
+ nObs = pyAnal->models[n][0].n;
+ int selIndex = pyRes->selectedModelIndex[n];
+ nParms = pyRes->models[n][selIndex].nparms;
+ for (int i=0; imodels[n][selIndex].doses[i];
+ Yp = pyAnal->models[n][selIndex].Y[i];
+ Yn = pyAnal->models[n][selIndex].n_group[i] - Yp;
+ prob = p[n][nParms-1];
+ if(n==0 && flag==1 && nParms==2){
+ prob = p[0][1];
+ for(int nt=1; ntndatasets; nt++){
+ prob -= p[nt][1];
+ }
+ }
+
+ for (int j=nParms-1; j>=0; j--){
+ if (n==0 && flag==1 && j==1){
+ pr = p[0][1];
+ for (int nt=1; ntndatasets; nt++){
+ pr -= p[nt][1];
+ }
+ prob = D*prob + pr;
+ } else {
+ prob = D*prob + p[n][j];
+ }
+ }
+ prob = (1-exp(-1.0* prob));
+ if ((prob==0) || (prob == 1)){
+ if(Yp <=0 || Yn <=0){
+ like += 0;
+ } else {
+ if (prob == 1){
+ like += Yn*(-710);
+ } else {
+ like += Yp*(-710);
+ }
+ }
+ } else {
+ like += Yp*log(prob) + Yn*log(1-prob);
+ }
+ }
+ }
+
+
+ bkg = 1.0 - exp(-1.0*(dSumParm1));
+
+
+ for(int n=0; nndatasets; n++){
+ int selIndex = pyRes->selectedModelIndex[n];
+ nParms = pyRes->models[n][selIndex].nparms;
+
+ prob = p[n][nParms-1];
+ if (n ==0 && flag == 1 && nParms == 2){
+ prob = p[0][1];
+ for(int nt = 1; ntndatasets; nt++){
+ prob -= p[nt][1];
+ }
+ }
+ for (int j=nParms-1; j>=0; j--){
+ if (n==0 && flag == 1 && j==1){
+ pr = p[0][1];
+ for (int nt = 1; ntndatasets; nt++){
+ pr -= p[nt][1];
+ }
+ prob = dose*prob + pr;
+ } else {
+ prob = dose*prob + p[n][j];
+ }
+ }
+
+ }
+
+ if (bkg == 1.0){
+ *crisk = 0.0;
+ } else {
+ *crisk = ((1.0 - exp(-1.0*(prob))) - bkg)/(1.0-bkg);
+ }
+
+ return like;
+}
+
+
+/***************************************************************************
+ * Multistage_ComboBMD -- Used to calculate the BMD and BMDL for combined
+ * Multistage models A and B
+ * external: bmdparm
+ * input: * nparm is the number of parameters
+ * nparm is the number of parameters/
+ * p[] is the vector of fitted parameters
+ * gtol is a small positive number
+ * iter is not used ????????
+ * xlk is the sum of log-likelihood for the fitted models (A + B)
+ * Rlevel[] is the vector of BMR's
+ * Bmdl[] is the vector of BMDL's for the BMR's
+ * Bmdu[] is the vector of BMDU's for the BMR's
+ * BMD is the benchmark dose
+ * output: BMD, Bmdl[], prints BMDL
+ ****************************************************************************/
+void Multistage_ComboBMD (struct python_multitumor_analysis *pyAnal, struct python_multitumor_result *pyRes){
+
+ int cnparm, selIndex, nT, is_zero;
+ double LR, xa, xb, D, fa, fb, Drange, cxmax, ck, poly;
+ double gtol = 1e-12;
+ double tol; //for zeroin function
+ std::vector cp;
+
+
+ nT = pyRes->ndatasets;
+ //find largest nparm and largest dose
+ cnparm = 0;
+ cxmax = 0;
+ for(int i=0; iselectedModelIndex[i];
+ if(pyRes->models[i][selIndex].nparms > cnparm){
+ cnparm = pyRes->models[i][selIndex].nparms;
+ }
+ double tmpMax = *max_element(std::begin(pyAnal->models[i][selIndex].doses), std::end(pyAnal->models[i][selIndex].doses));
+ if(cxmax < tmpMax) cxmax = tmpMax;
+ }
+ //add all model parameters to combined p
+ cp.resize(cnparm, 0.0);
+ for(int i=0; iselectedModelIndex[i];
+ cp[0] = cp[0] - log(1.0 - pyRes->models[i][selIndex].parms[0]);
+ for(int j=1; jmodels[i][selIndex].nparms; j++){
+ cp[j] = cp[j] + pyRes->models[i][selIndex].parms[j];
+ }
+ }
+ //compute chi-squared value
+ double cl = 1.0 - pyAnal->alpha;
+ if (cl<0.5){
+ LR = 0.5*gsl_cdf_chisq_Pinv(1.0-2.0*cl, 1);
+ } else {
+ LR = 0.5*gsl_cdf_chisq_Pinv(2.0*cl-1.0, 1);
+ }
+
+ ck = -log(1-pyAnal->BMR); //Extra risk
+ //solve the BMD
+ xa = D = 0.0;
+ fa = -ck; //note: ck>0.0
+ fb = fa;
+ Drange = cxmax;
+
+ int k=1;
+ while(k<300 && fb<0){
+ fa=fb;
+ xa=D;
+ D=Drange*k/100.0;
+ poly=cp[cnparm-1];
+ for (int j=cnparm-2; j>0; j--){
+ poly=poly*D+cp[j];
+ }
+ poly=poly*D;
+ fb=poly-ck;
+ k++;
+ }
+
+ if (fb<0) std::cout<<"BMD Computation failed. BMD is larger than three times maximum input doses."<BMD = xb;
+
+ is_zero = 0;
+
+ pyRes->BMDL = BMDL_combofunc(pyAnal, pyRes, xb, xa, LR, tol, &is_zero);
+// std::cout<<"pyRes->BMDL="<BMDL<BMDU = BMDU_combofunc(pyAnal, pyRes, xb, xa, LR, tol, &is_zero);
+// std::cout<<"pyRes->BMDU="<BMDU< &x, std::vector &grad, void *my_func_data){
+ //obj function of form F(BMD, beta) = BMD, where BMD=X[0] and beta=X[i] where i>0
+ if (!grad.empty()){
+ //fill all gradients to zero (grad[1] to grad[n] are all zero
+ // because objective function only depends on X[0]
+ std::fill(grad.begin(), grad.end(), 0);
+ //set first grad to 1, since x[1] should be the BMD (Dose)
+ grad[0] = 1.0;
+ }
+ return x[0];
+}
+
+
+double objfunc_bmdu(const std::vector &x, std::vector &grad, void *my_func_data){
+ //obj function of form F(BMD, beta) = BMD, where BMD=X[0] and beta=X[i] where i>0
+ if (!grad.empty()){
+ //fill all gradients to zero (grad[1] to grad[n] are all zero
+ // because objective function only depends on X[0]
+ std::fill(grad.begin(), grad.end(), 0);
+ //set first grad to 1, since x[1] should be the BMD (Dose)
+ grad[0] = -1.0;
+ }
+ return -1*x[0];
+}
+
+double myEqualityConstraint(const std::vector &x, std::vector &grad, void *data){
+
+ msComboEq *d = reinterpret_cast(data);
+ double bmr = d->bmr;
+ int nT = d->nT;
+ std::vector degree = d->degree;
+
+ double D = exp(x[0]);
+ int iIndex = x.size() - 1;
+ double sum2 = 0.0;
+ double sum = 0.0;
+
+ if (!grad.empty()){
+ for (int l=nT-1; l>=0; l--){
+ sum = 0.0;
+ for (int k=degree[l]; k>0; k--){
+ sum = sum*D + k*x[iIndex]*D;
+ iIndex -= 1;
+ }
+ iIndex -= 1;
+ sum2 += sum;
+ }
+ grad[0] = sum2;
+
+ iIndex = 1;
+ for (int k=0; k=0; l--){
+ sum2 = 0.0;
+ for (int k=degree[l]; k>0; k--){
+ sum2 = sum2*D + x[iIndex]*D;
+ iIndex -= 1;
+ }
+ iIndex -= 1;
+ sum3 += sum2;
+ }
+
+ return sum + sum3;
+}
+
+
+double myInequalityConstraint1(const std::vector &x, std::vector &grad, void *data){
+
+ msComboInEq *d = reinterpret_cast(data);
+ double target = d->target;
+ int nT = d->nT;
+ std::vector nObs = d->nObs;
+ std::vector degree = d->degree;
+ std::vector> doses = d->doses;
+ std::vector> Y = d->Y;
+ std::vector> n_group = d->n_group;
+
+ int m = nT;
+ int iOffset = 1;
+ double resid = 0.0;
+ if (!grad.empty()){
+ for (size_t i=0; i=iBottom; j--){
+ sum = sum * doses[m][k] + x[j];
+ }
+ if (sum < 0) sum = 0.0;
+ double P = 1.0 - exp(-1.0*sum);
+ resid = (Y[m][k]*dslog(P) - (n_group[m][k]-Y[m][k])*dslog(1.0-P))*(P-1.0);
+ int iIndex = iTop-1;
+ for(int j=degree[l]-1; j>=0; j--){
+ grad[iIndex] = grad[iIndex] + resid*(pow(doses[m][k],j));
+ iIndex = iIndex-1;
+ }
+ }
+ iOffset = iTop + 1;
+ }
+ }
+
+
+ double sum2 = 0;
+ iOffset = 1;
+ m = nT;
+ for (int l=0; l=iBottom; j--){
+ sum = sum*doses[m][k] + x[j];
+ }
+ if (sum < 0) sum = 0.0;
+ double P = 1.0 - exp(-1*sum);
+ sum2 = sum2 + Y[m][k]*slog(P) + (n_group[m][k]-Y[m][k]) * slog(1.0 - P);
+ }
+ iOffset = iTop + 1;
+ }
+ return target - sum2;
+
+}
+
+
+double slog(double X){
+ double coefs[4] = {6.7165863851209542e50,-2.0154759155362862e+35, 2.0169759155362859e+19,-710};
+ if (X >= 1e-16){
+ return log(X);
+ } else {
+ double v = 0.0;
+ for (int i=0; i<4; i++){
+ v = X * v + coefs[i];
+ }
+ return v;
+ }
+}
+
+double dslog(double P){
+ if (P >= 1e-10){
+ return 1.0/P;
+ } else {
+ return 2.0e10 - 1.0e20 * P;
+ }
+}
+
+
+
+void BMDS_ENTRY_API __stdcall runBMDSDichoAnalysis(struct dichotomous_analysis *anal, struct dichotomous_model_result *res, struct dichotomous_GOF *gof, struct BMDS_results *bmdsRes, struct dicho_AOD *bmdsAOD){
+
+// std::cout<<"degree = "<degree;
+// std::cout<<"degree:"<degree<prior_cols*anal->parms; k++){
+// std::cout<prior[k]<<", ";
+// }
+// std::cout<validResult = false;
+ bmdsRes->slopeFactor = BMDS_MISSING;
+ bmdsRes->BMD = BMDS_MISSING;
+ bmdsRes->BMDL = BMDS_MISSING;
+ bmdsRes->BMDU = BMDS_MISSING;
+ bmdsRes->bounded.resize(anal->parms);
+ fill(bmdsRes->bounded.begin(), bmdsRes->bounded.end(), false);
+
+ estimate_sm_laplace_dicho(anal, res, true);
+
+ struct dichotomous_PGOF_data gofData;
+ gofData.n = anal->n;
+ gofData.Y = anal->Y;
+ gofData.model = anal->model;
+ gofData.model_df = res->model_df;
+ gofData.est_parms = res->parms;
+ gofData.doses = anal->doses;
+ gofData.n_group = anal->n_group;
+ gofData.parms = anal->parms;
+
+ struct dichotomous_PGOF_result gofRes;
+ double* gofExpected = (double*)malloc(anal->n * sizeof(double));
+ double* gofResidual = (double*)malloc(anal->n * sizeof(double));
+ double gofTestStat = BMDS_MISSING;
+ double gofPVal = BMDS_MISSING;
+ double gofDF = BMDS_MISSING;
+ double* ebUpper = (double*)malloc(anal->n * sizeof(double));
+ double* ebLower = (double*)malloc(anal->n * sizeof(double));
+ gofRes.n = anal->n;
+ gofRes.expected = gofExpected;
+ gofRes.residual = gofResidual;
+ gofRes.test_statistic = gofTestStat;
+
+ gofRes.p_value = gofPVal;
+ gofRes.df = gofDF;
+
+ compute_dichotomous_pearson_GOF(&gofData, &gofRes);
+
+ gof->test_statistic = gofRes.test_statistic;
+
+ //these will be updated later with bounded parm info
+ gof->p_value = gofRes.p_value;
+ gof->df = gofRes.df;
+
+
+ gof->n = gofRes.n;
+ for (int i=0; iexpected.push_back(gofRes.expected[i]);
+ gof->residual.push_back(gofRes.residual[i]);
+ }
+
+
+ //do error bar calcs
+ double pHat;
+ double z;
+ double eb1;
+ double eb2;
+ double ebDenom;
+ double gofAlpha = 0.05; //Alpha value for 95% confidence limit
+ z = gsl_cdf_ugaussian_Pinv(1.0 - gofAlpha / 2); //Z score
+ for (int i=0; in; i++){
+ pHat = anal->Y[i]/anal->n_group[i]; //observed probability
+ eb1 = (2 * anal->Y[i] + z*z);
+ eb2 = z*sqrt(z*z - (2 + 1/anal->n_group[i]) + 4*pHat*((anal->n_group[i] - anal->Y[i]) + 1));
+ ebDenom = 2.0 * (anal->n_group[i] + z * z);
+ gof->ebLower.push_back((eb1 - 1 - eb2)/ ebDenom);
+ gof->ebUpper.push_back((eb1 + 1 + eb2)/ ebDenom);
+ }
+
+ //calculate model chi^2 value
+ bmdsRes->chisq = 0.0;
+ for (int i=0; ichisq += gofRes.residual[i]*gofRes.residual[i];
+ }
+
+ //calculate bayesian BIC_equiv
+ Eigen::MatrixXd cov(res->nparms,res->nparms);
+ int row = 0;
+ int col = 0;
+ for(int i=0; inparms*res->nparms; i++){
+ col = i/res->nparms;
+ row = i - col*res->nparms;
+ cov(row,col) = res->cov[i];
+ }
+
+ bmdsRes->BIC_equiv = res->nparms / 2.0 *log(2.0*M_PI) + res->max + 0.5*log(max(0.0, cov.determinant()));
+ bmdsRes->BIC_equiv = -1*bmdsRes->BIC_equiv;
+
+
+ //calculate dichtomous analysis of deviance
+ struct dichotomous_aod aod;
+ double A1 = BMDS_MISSING;
+ int N1 = BMDS_MISSING;
+ double A2 = BMDS_MISSING;
+ int N2 = BMDS_MISSING;
+ aod.A1 = A1;
+ aod.N1 = N1;
+ aod.A2 = A2;
+ aod.N2 = N2;
+
+ calc_dichoAOD(anal, res, bmdsRes, bmdsAOD, &aod);
+
+ rescale_dichoParms(anal->model, res->parms);
+
+ double estParmCount = 0;
+ collect_dicho_bmd_values(anal, res, bmdsRes, estParmCount);
+
+ //incorporate affect of bounded parameters
+ int bounded = 0;
+ for (int i=0; iparms; i++){
+ if (bmdsRes->bounded[i]){
+ bounded++;
+ }
+ }
+
+ bmdsAOD->nFit = anal->parms - bounded; //number of estimated parameter
+ bmdsAOD->dfFit = anal->n - bmdsAOD->nFit; //nObs - nEstParms
+
+ if (bmdsAOD->devFit < 0 || bmdsAOD->dfFit < 0) {
+ bmdsAOD->pvFit = BMDS_MISSING;
+ } else {
+ bmdsAOD->pvFit = 1.0 -gsl_cdf_chisq_P(bmdsAOD->devFit, bmdsAOD->dfFit);
+ }
+
+
+ //update df for frequentist models only
+ if(anal->prior[1] == 0){
+ //frequentist
+ gofRes.df = bmdsAOD->dfFit;
+ }
+ gof->df = gofRes.df;
+
+ if ( gof->df > 0.0){
+ gofRes.p_value = 1.0 - gsl_cdf_chisq_P(bmdsRes->chisq, gof->df);
+ }else{
+ gofRes.p_value = 1.0;
+ }
+
+ //gofRes.p_value =
+ if (gof->df <= 0.0){
+ gof->p_value = BMDS_MISSING;
+ } else {
+ gof->p_value = gofRes.p_value;
+ }
+
+
+ for (int i=0; i< anal->parms; i++){
+ //std err is sqrt of covariance diagonals unless parameter hit a bound, then report NA
+ bmdsRes->stdErr.push_back(BMDS_MISSING);
+ bmdsRes->lowerConf.push_back(BMDS_MISSING);
+ bmdsRes->upperConf.push_back(BMDS_MISSING);
+ }
+
+ calcParmCIs_dicho (res, bmdsRes);
+
+
+ //compare Matt's BMD to BMD from CDF
+ if (abs(bmdsRes->BMD - res->bmd) > BMDS_EPS){
+ std::cout<<"Warning: CDF BMD differs from model BMD" << std::endl;
+ }
+ //Use Matt's BMD by default
+ bmdsRes->BMD = res->bmd;
+
+ clean_dicho_results(res, gof, bmdsRes, bmdsAOD);
+
+ bool goodCDF = false;
+ for (int i=0; idist_numE;i++){
+ if (res->bmd_dist[i] != BMDS_MISSING){
+ goodCDF = true;
+ }
+ }
+
+ if (bmdsRes->BMD != BMDS_MISSING && !std::isinf(bmdsRes->BMD) && std::isfinite(bmdsRes->BMD) && goodCDF) {
+ bmdsRes->validResult = true;
+ }
+
+}
+
+void BMDS_ENTRY_API __stdcall runBMDSDichoMA(struct dichotomousMA_analysis *MA, struct dichotomous_analysis *DA, struct dichotomousMA_result *res, struct BMDSMA_results *bmdsRes){
+
+
+ bmdsRes->BMD_MA = BMDS_MISSING;
+ bmdsRes->BMDL_MA = BMDS_MISSING;
+ bmdsRes->BMDU_MA = BMDS_MISSING;
+
+ estimate_ma_laplace_dicho(MA, DA, res);
+
+
+ collect_dichoMA_bmd_values(MA, res, bmdsRes, DA->alpha);
+ for(int i=0; inmodels; i++){
+ rescale_dichoParms(MA->models[i], res->models[i]->parms);
+ }
+
+ //calc error bars for graphs
+ double pHat;
+ double z;
+ double eb1;
+ double eb2;
+ double ebDenom;
+ double gofAlpha = 0.05; //Alpha value for 95% confidence limit
+ z = gsl_cdf_ugaussian_Pinv(1.0 - gofAlpha / 2); //Z score
+ for (int i=0; in; i++){
+ pHat = DA->Y[i]/DA->n_group[i]; //observed probability
+ eb1 = (2 * DA->Y[i] + z*z);
+ eb2 = z*sqrt(z*z - (2 + 1/DA->n_group[i]) + 4*pHat*((DA->n_group[i] - DA->Y[i]) + 1));
+ ebDenom = 2.0 * (DA->n_group[i] + z * z);
+ bmdsRes->ebLower[i] = (eb1 - 1 - eb2)/ ebDenom;
+ bmdsRes->ebUpper[i] = (eb1 + 1 + eb2)/ ebDenom;
+ }
+
+ clean_dicho_MA_results(res, bmdsRes);
+
+}
+
+//transform parameters or priors which were computed using a logistic dist.
+void rescale_dichoParms(int model, double *parms){
+ //rescale background parameters for all models and v parameter for DHill model
+ switch(model){
+ case dich_model::d_multistage:
+ case dich_model::d_weibull:
+ case dich_model::d_gamma:
+ case dich_model::d_loglogistic:
+ case dich_model::d_qlinear:
+ case dich_model::d_logprobit:
+ //rescale background parameter
+ parms[0] = 1.0/(1.0+exp(-1.0*parms[0]));
+ break;
+ case dich_model::d_hill:
+ //rescale background and v parameter
+ parms[0] = 1.0/(1.0+exp(-1.0*parms[0]));
+ parms[1] = 1.0/(1.0+exp(-1.0*parms[1]));
+ break;
+ default:
+ break;
+ }
+
+}
+
+void calcParmCIs_dicho (struct dichotomous_model_result *res, struct BMDS_results *bmdsRes) {
+
+ double *adj = new double[res->nparms]; //adjustments for transformed parameters
+ int diagInd = 0; //1d index of 2d diagonal location
+ double tmp = 0; //used for tmp calculations
+
+
+
+ for (int i=0; inparms; i++){
+ diagInd = i*(1+res->nparms); //gives index of diagonal for 1d flattened array
+ adj[i] = 1.0;
+ if (!bmdsRes->bounded[i]){
+ bmdsRes->stdErr[i] = sqrt(res->cov[diagInd]);
+ }
+ }
+
+ //now check if parameters were transformed and modify adj values accordingly
+ //if transformed, then multiply adj * derivative of transform (at parm value)
+ switch(res->model){
+ //transform parameters which were computed using a logistic dist.
+ //void rescale_dichoParms(int model, struct dichotomous_model_result *res){
+ case dich_model::d_multistage:
+ case dich_model::d_weibull:
+ case dich_model::d_gamma:
+ case dich_model::d_loglogistic:
+ case dich_model::d_qlinear:
+ case dich_model::d_logprobit:
+ //background parameter
+ tmp = exp(-1*res->parms[0]);
+ tmp = tmp/((1.0+tmp)*(1.0+tmp));
+ adj[0] = tmp*tmp;
+ break;
+ case dich_model::d_hill:
+ //background and v parameter
+ tmp = exp(-1*res->parms[0]);
+ tmp = tmp/((1.0+tmp)*(1.0+tmp));
+ adj[0] = tmp*tmp;
+ tmp = exp(-1*res->parms[1]);
+ tmp = tmp/((1.0+tmp)*(1.0+tmp));
+ adj[1] = tmp*tmp;
+ break;
+ default:
+ break;
+ }
+
+ //now calculate upper and lower confidence intervals
+
+ for (int i=0; inparms; i++){
+ if (bmdsRes->bounded[i]){
+ bmdsRes->lowerConf[i] = BMDS_MISSING;
+ bmdsRes->upperConf[i] = BMDS_MISSING;
+ } else {
+ bmdsRes->stdErr[i] = bmdsRes->stdErr[i] * adj[i];
+ bmdsRes->lowerConf[i] = res->parms[i] - bmdsRes->stdErr[i]*BMDS_QNORM;
+ bmdsRes->upperConf[i] = res->parms[i] + bmdsRes->stdErr[i]*BMDS_QNORM;
+ }
+ }
+
+ delete [] adj;
+
+}
+
+
+void BMDS_ENTRY_API __stdcall runBMDSContAnalysis(struct continuous_analysis *anal, struct continuous_model_result *res, struct BMDS_results *bmdsRes, struct continuous_AOD *aod, struct continuous_GOF *gof, bool *detectAdvDir, bool *restricted){
+
+ bmdsRes->BMD = BMDS_MISSING;
+ bmdsRes->BMDL = BMDS_MISSING;
+ bmdsRes->BMDU = BMDS_MISSING;
+ bmdsRes->validResult = false;
+ anal->transform_dose = false;
+ //if (anal->model == cont_model::polynomial && anal->disttype == distribution::log_normal){
+ if (anal->model != cont_model::exp_3 && anal->model != cont_model::exp_5){
+ if(anal->disttype == distribution::log_normal){
+ return;
+ }
+ }
+ if (*detectAdvDir){
+ determineAdvDir(anal);
+ int ind;
+ if (*restricted) {
+ switch(anal->model){
+ case cont_model::exp_3:
+ case cont_model::exp_5:
+ if (anal->prior[0] == 0){ //checks if frequentist model
+ if(anal->isIncreasing){
+ if (anal->disttype == distribution::normal_ncv){
+ anal->prior[20] = 0.0; //c min
+ } else {
+ anal->prior[17] = 0.0; //c min
+ }
+ } else {
+ if (anal->disttype == distribution::normal_ncv){
+ anal->prior[26] = 0.0; //c max
+ } else {
+ anal->prior[22] = 0.0; //c max
+ }
+ }
+ }
+ break;
+ case cont_model::polynomial:
+ if(anal->prior[0] == 0){ //checks if frequentist model
+ int numRows = 2+anal->degree;
+ int ind; //index in prior array
+ if (anal->disttype == distribution::normal_ncv){
+ numRows++;
+ }
+ //set ind to target min or max for 1st beta parameter then adjust as needed
+ //min for increasing, max for decreasing
+ if(anal->isIncreasing){
+ ind = 3*numRows+1;
+ } else {
+ ind = 4*numRows+1;
+ }
+ //beta terms
+ for (int i=ind; idegree; i++){
+ anal->prior[i] = 0.0; // beta min or max
+ }
+ }
+ break;
+ } //end switch
+ } //end if restricted
+ } //end if detectAdvDir
+
+ //need to handle issue with decreasing response datasets for relative deviation
+ //easiest workaround is to modify BMRF before sending to estimate_sm_laplace
+ if (anal->BMD_type == 3) { //rel dev
+ if(!anal->isIncreasing){
+ anal->BMR = 1.0 - anal->BMR;
+ }
+ }
+
+ estimate_sm_laplace_cont(anal, res);
+
+ //if not suff_stat, then convert
+ struct continuous_analysis GOFanal;
+ //arrays are needed for conversion to suff_stat
+ double* doses = (double*)malloc(anal->n*sizeof(double));
+ double* means = (double*)malloc(anal->n * sizeof(double));
+ double* n_group = (double*)malloc(anal->n * sizeof(double));
+ double* sd = (double*)malloc(anal->n * sizeof(double));
+ for (int i=0; in; i++){
+ means[i] = anal->Y[i];
+ doses[i] = anal->doses[i];
+ }
+ bool isIncreasing = true;
+ double BMR = BMDS_MISSING;
+ double tail_prob = BMDS_MISSING;
+ int disttype = BMDS_MISSING;
+ double alpha = BMDS_MISSING;
+ int samples = BMDS_MISSING;
+ int degree = BMDS_MISSING;
+ int burnin = BMDS_MISSING;
+ int parms = BMDS_MISSING;
+ int prior_cols = BMDS_MISSING;
+
+ if (anal->suff_stat){
+ GOFanal = *anal;
+ } else {
+ //copy analysis and convert to suff_stat
+ GOFanal.n = anal->n;
+ GOFanal.doses = doses;
+ GOFanal.Y = means;
+ GOFanal.n_group = n_group;
+ GOFanal.sd = sd;
+ GOFanal.isIncreasing = isIncreasing;
+ GOFanal.BMR = BMR;
+ GOFanal.tail_prob = tail_prob;
+ GOFanal.disttype = disttype;
+ GOFanal.alpha = alpha;
+ GOFanal.samples = samples;
+ GOFanal.degree = degree;
+ GOFanal.burnin = burnin;
+ GOFanal.parms = parms;
+ GOFanal.prior_cols = prior_cols;
+ GOFanal.disttype = distribution::normal; //needed for all distrubutions to avoid error in ln conversion to suff_stats
+ bmdsConvertSStat(anal, &GOFanal, true);
+ }
+
+
+ continuous_expected_result GOFres;
+ GOFres.n = GOFanal.n;
+ GOFres.expected = new double[GOFanal.n];
+ GOFres.sd = new double[GOFanal.n];
+
+ continuous_expectation(&GOFanal, res, &GOFres);
+
+ for (int i=0; idose.push_back(GOFanal.doses[i]);
+ gof->size.push_back(GOFanal.n_group[i]);
+ gof->estMean.push_back(GOFres.expected[i]);
+ gof->obsMean.push_back(GOFanal.Y[i]);
+ gof->estSD.push_back(GOFres.sd[i]);
+ gof->obsSD.push_back(GOFanal.sd[i]);
+ gof->res.push_back(sqrt(gof->size[i])*(gof->obsMean[i] - gof->estMean[i]) / gof->estSD[i]);
+// gof->n = GOFanal.n;
+ }
+ gof->n = GOFanal.n;
+ if (anal->disttype == distribution::log_normal){
+ for (int i=0; icalcMean.push_back(exp(log(GOFanal.Y[i]) - log(1 + pow(GOFanal.sd[i] / GOFanal.Y[i], 2.0)) / 2));
+ gof->calcSD.push_back(exp(sqrt(log(1.0 + pow(GOFanal.sd[i]/GOFanal.Y[i], 2.0)))));
+ }
+ for (int i=0; iestMean.push_back(exp(gof->estMean[i]+pow(exp(res->parms[res->nparms-1]),2)/2));
+ gof->res.push_back(sqrt(gof->size[i])*(gof->obsMean[i] - gof->estMean[i]) / gof->estSD[i]);
+ }
+ } else {
+ for (int i=0; icalcMean.push_back(GOFanal.Y[i]);
+ gof->calcSD.push_back(GOFanal.sd[i]);
+ }
+ }
+
+ double ebUpper, ebLower;
+ for (int i=0; icalcMean[i] + gsl_cdf_tdist_Pinv(0.025, gof->n - 1) * (gof->obsSD[i]/sqrt(gof->size[i]));
+ ebUpper = gof->calcMean[i] + gsl_cdf_tdist_Pinv(0.975, gof->n - 1) * (gof->obsSD[i]/sqrt(gof->size[i]));
+ gof->ebLower.push_back(ebLower);
+ gof->ebUpper.push_back(ebUpper);
+ }
+ //calculate bayesian BIC_equiv
+ Eigen::MatrixXd cov(res->nparms,res->nparms);
+ int row = 0;
+ int col = 0;
+ for(int i=0; inparms*res->nparms; i++){
+ col = i/res->nparms;
+ row = i - col*res->nparms;
+ cov(row,col) = res->cov[i];
+ }
+
+ bmdsRes->BIC_equiv = res->nparms / 2.0 *log(2.0*M_PI) + res->max + 0.5*log(max(0.0, cov.determinant()));
+ bmdsRes->BIC_equiv = -1*bmdsRes->BIC_equiv;
+ collect_cont_bmd_values(anal, res, bmdsRes);
+
+ aod->LL.resize(5);
+ aod->nParms.resize(5);
+ aod->AIC.resize(5);
+ aod->TOI.llRatio.resize(4);
+ aod->TOI.DF.resize(4);
+ aod->TOI.pVal.resize(4);
+ calc_contAOD(anal, &GOFanal, res, bmdsRes, aod);
+
+ rescale_contParms(anal, res->parms);
+
+ for (int i=0; i< anal->parms; i++){
+ bmdsRes->stdErr.push_back(BMDS_MISSING);
+ bmdsRes->lowerConf.push_back(BMDS_MISSING);
+ bmdsRes->upperConf.push_back(BMDS_MISSING);
+ }
+
+ calcParmCIs_cont(res, bmdsRes);
+
+ //compare Matt's BMD to BMD from CDF
+ if (abs(bmdsRes->BMD - res->bmd) > BMDS_EPS){
+ std::cout<<"Warning: CDF BMD differs from model BMD" << std::endl;
+ }
+ //Use Matt's BMD by default
+ bmdsRes->BMD = res->bmd;
+
+ clean_cont_results(res, bmdsRes, aod, gof);
+
+ bool goodCDF = false;
+ for (int i=0; idist_numE;i++){
+ if (res->bmd_dist[i] != BMDS_MISSING){
+ goodCDF = true;
+ }
+ }
+
+ if (bmdsRes->BMD != BMDS_MISSING && !std::isinf(bmdsRes->BMD) && std::isfinite(bmdsRes->BMD) && goodCDF) {
+ bmdsRes->validResult = true;
+ }
+ // //compute confidence number
+ // std::cout<<"cov: " << cov << std::endl;
+ // Eigen::VectorXcd eivals = cov.eigenvalues();
+ // std::cout << "Eigenvalues are: " << std::endl << eivals << std::endl;
+ // Eigen::VectorXd eVals = eivals.real();
+ // double min = *std::min_element(std::begin(eVals.array()), std::end(eVals.array()));
+ // double max = *std::max_element(std::begin(eVals.array()), std::end(eVals.array()));
+ // std::cout << "Min: " << min << std::endl;
+ // std::cout << "Max: " << max << std::endl;
+ // std::cout << "Condition number: " << max/min << std::endl;
+ delete [] GOFres.expected;
+ delete [] GOFres.sd;
+}
+
+
+void rescale_contParms(struct continuous_analysis *CA, double *parms){
+ //rescale alpha parameter for all continuous models
+ //assumes alpha parameter is always last
+ switch (CA->model){
+ case cont_model::hill:
+ case cont_model::power:
+ case cont_model::funl:
+ case cont_model::polynomial:
+ //rescale alpha
+ parms[CA->parms-1] = exp(parms[CA->parms-1]);
+ break;
+// case cont_model::exp_3:
+// //rescale g for log_normal
+// if (CA->disttype == distribution::log_normal){
+// parms[0] = parms[0]*exp(pow(exp(parms[CA->parms-2]),2)/2);
+// }
+// break;
+ case cont_model::exp_5:
+// //rescale g for log_normal
+// if (CA->disttype == distribution::log_normal){
+// parms[0] = parms[0]*exp(pow(exp(parms[CA->parms-2]),2)/2);
+// }
+ //rescale c
+ parms[2] = exp(parms[2]);
+ break;
+ }
+}
+
+void calcParmCIs_cont(struct continuous_model_result *res, struct BMDS_results *bmdsRes){
+
+ double *adj = new double[res->nparms]; //adjustments for transformed parameters
+ int diagInd = 0; //1d index of 2d diagonal location
+ double tmp = 0; //used for tmp calculations
+
+ for (int i=0; inparms; i++){
+ diagInd = i*(1+res->nparms); //gives index of diagonal for 1d flattened array
+ adj[i] = 1.0;
+ if (!bmdsRes->bounded[i]){
+ bmdsRes->stdErr[i] = sqrt(res->cov[diagInd]);
+ }
+ }
+
+ //now check if parameters were transformed and modify adj values accordingly
+ //if transformed, then multiply adj * derivative of transform (at parm value)
+ switch(res->model){
+ case cont_model::hill:
+ case cont_model::power:
+ case cont_model::funl:
+ case cont_model::polynomial:
+ //rescale alpha //at this point alpha has already been rescaled, so no need to use exp(parm)
+ //since actual parm is ln(alpha), then gprime(theta) = exp(theta) = exp(ln(alpha)) = alpha
+ //tmp = exp(res->parms[res->nparms-1]);
+ tmp = res->parms[res->nparms-1];
+ adj[res->nparms-1] = adj[res->nparms-1]*tmp*tmp;
+ break;
+ case cont_model::exp_5:
+ //rescale c
+ //tmp = exp(res->parms[2]);
+ //adj[2] = adj[2]*tmp*tmp;
+ break;
+ default:
+ break;
+ }
+
+ //now calculate upper and lower confidence intervals
+ for (int i=0; inparms; i++){
+ if (bmdsRes->bounded[i]){
+ bmdsRes->lowerConf[i] = BMDS_MISSING;
+ bmdsRes->upperConf[i] = BMDS_MISSING;
+ } else {
+ bmdsRes->stdErr[i] = bmdsRes->stdErr[i] * adj[i];
+ bmdsRes->lowerConf[i] = res->parms[i] - bmdsRes->stdErr[i]*BMDS_QNORM;
+ bmdsRes->upperConf[i] = res->parms[i] + bmdsRes->stdErr[i]*BMDS_QNORM;
+ }
+ }
+
+}
+
+
+void calc_dichoAOD(struct dichotomous_analysis *DA, struct dichotomous_model_result *res, struct BMDS_results *bmdsRes, struct dicho_AOD *bmdsAOD, struct dichotomous_aod *aod){
+
+ deviance_dichotomous(DA, aod);
+
+ bmdsAOD->fullLL = -1*aod->A1;
+ bmdsAOD->nFull = aod->N1;
+ bmdsAOD->redLL = -1*aod->A2;
+ bmdsAOD->nRed = aod->N2;
+ bmdsAOD->fittedLL = -1*res->max;
+ int bounded = 0;
+ for(int i=0; iparms;i++){
+ if(bmdsRes->bounded[i]){
+ bounded++;
+ }
+ }
+
+ bmdsAOD->devFit = 2*(bmdsAOD->fullLL - bmdsAOD->fittedLL);
+
+ //these fitted model values will be calculated later after bounded parms have been determined
+ bmdsAOD->nFit = BMDS_MISSING;
+ bmdsAOD->dfFit = BMDS_MISSING;
+ bmdsAOD->pvFit = BMDS_MISSING;
+
+ double dev;
+ double df;
+
+ bmdsAOD->devRed = dev = 2* (bmdsAOD->fullLL - bmdsAOD->redLL);
+ bmdsAOD->dfRed = df = DA->n-1;
+ if (dev < 0 || df < 0) {
+ bmdsAOD->pvRed = BMDS_MISSING;
+ } else {
+ bmdsAOD->pvRed = 1.0 - gsl_cdf_chisq_P(dev, df);
+ }
+
+}
+
+void calc_contAOD(struct continuous_analysis *CA, struct continuous_analysis *GOFanal, struct continuous_model_result *res, struct BMDS_results *bmdsRes, struct continuous_AOD *aod){
+ continuous_deviance CD;
+
+ if (CA->disttype == distribution::log_normal){
+ estimate_log_normal_aod(CA, &CD);
+ } else {
+ estimate_normal_aod(CA, &CD);
+ }
+ //fill aod with results and calculate tests of interest
+ aod->LL[0] = -1*CD.A1;
+ aod->nParms[0] = CD.N1;
+ aod->LL[1] = -1*CD.A2;
+ aod->nParms[1] = CD.N2;
+ //A3 is strictly NCV test. If CV use results from A1
+ if (CA->disttype == distribution::normal_ncv) {
+ aod->LL[2] = -1*CD.A3;
+ aod->nParms[2] = CD.N3;
+ } else {
+ aod->LL[2] = -1*CD.A1;
+ aod->nParms[2] = CD.N1;
+ }
+ aod->LL[3] = -1*res->max;
+ int bounded = 0;
+ for(int i=0; iparms;i++){
+ if(bmdsRes->bounded[i]){
+ bounded++;
+ }
+ }
+ //aod->nParms[3] = CA->parms - bounded;
+ aod->nParms[3] = res->nparms - bounded;
+ aod->LL[4] = -1*CD.R;
+ aod->nParms[4] = CD.NR;
+
+ //add tests of interest
+ //TODO: need to check for bounded parms in A1,A2,A3,R
+ double sumN = 0;
+ for (int i=0; i<5; i++){
+ aod->AIC[i] = 2*(-1*aod->LL[i] + aod->nParms[i]);
+ }
+
+ if (CA->suff_stat) {
+ for(int i=0; in; i++){
+ sumN+=CA->n_group[i];
+ }
+ } else {
+ sumN = CA->n;
+ }
+ aod->addConst = -1*sumN*log(2*M_PI)/2;
+ if (CA->disttype == distribution::log_normal) {
+ double tmp = 0;
+ if (CA->suff_stat){
+ for (int i=0; in;i++){
+ tmp += (log(CA->Y[i])-log(1+pow((CA->sd[i]/CA->Y[i]),2))/2)*CA->n_group[i];
+ }
+ //aod->addConst -= tmp;
+ } else {
+ //TODO
+ GOFanal->disttype = distribution::log_normal; //previous GOFanal is always normal distribution
+ bmdsConvertSStat(CA, GOFanal, false); //recalculate using with lognormal transformation
+ double divisorTerm = GOFanal->Y[0];
+ //rescale to match BMDS 3.x
+ for (int i=0; in; i++){
+ GOFanal->Y[i] /= divisorTerm;
+ GOFanal->sd[i] /= divisorTerm;
+ }
+ //convert from logscale
+ double tmpMean, tmpSD;
+ for (int i=0; in; i++){
+ tmpMean = GOFanal->Y[i];
+ tmpSD = GOFanal->sd[i];
+ GOFanal->Y[i] = exp(tmpMean+pow(tmpSD,2)/2);
+ GOFanal->sd[i] = GOFanal->Y[i]*sqrt(exp(pow(tmpSD,2))-1);
+
+ //reverse response scaling
+ GOFanal->Y[i] *= divisorTerm;
+ GOFanal->sd[i] *= divisorTerm;
+
+ //convert to log scale
+ tmpMean = GOFanal->Y[i];
+ tmpSD = GOFanal->sd[i];
+ GOFanal->Y[i] = log(tmpMean) - log(1+pow(tmpSD/tmpMean,2))/2;
+ GOFanal->sd[i] = sqrt(log(1+pow(tmpSD/tmpMean,2)));
+
+ tmp += GOFanal->Y[i] * GOFanal->n_group[i];
+ }
+
+ //aod->addConst -= tmp;
+
+ }
+ aod->addConst -= tmp;
+ }
+
+ calcTestsOfInterest(aod);
+}
+
+
+
+void calcTestsOfInterest(struct continuous_AOD *aod){
+
+ double dev;
+ int df;
+
+ //Test #1 - A2 vs Reduced - does mean and/or variance differ across dose groups
+ //TOI->llRatio[0] = dev = 2 * (aod->LL[1] - aod->LL[4]);
+ dev = (aod->LL[1] - aod->LL[4]);
+ //handle underflow/negative zero
+ if (dev < BMDS_EPS){
+ dev = 0.0;
+ }
+ aod->TOI.llRatio[0] = dev = 2*dev;
+
+ aod->TOI.DF[0] = df = aod->nParms[1] - aod->nParms[4];
+ aod->TOI.pVal[0] = (std::isnan(dev) || dev < 0.0 || df < 0.0) ? -1 : 1.0 - gsl_cdf_chisq_P(dev, df);
+
+ //Test #2 - A1 vs A2 - homogeneity of variance across dose groups
+ dev = (aod->LL[1] - aod->LL[0]);
+ //handle underflow/negative zero
+ if (dev < BMDS_EPS){
+ dev = 0.0;
+ }
+ aod->TOI.llRatio[1] = dev = 2*dev;
+ //TOI->llRatio[1] = dev = 2 * (aod->LL[1] - aod->LL[0]);
+ aod->TOI.DF[1] = df = aod->nParms[1] - aod->nParms[0];
+ aod->TOI.pVal[1] = (std::isnan(dev) || dev < 0.0 || df < 0.0) ? -1 : 1.0 - gsl_cdf_chisq_P(dev, df);
+
+ //Test #3 - A2 vs A3 - Does the model describe variances adequately
+ dev = (aod->LL[1] - aod->LL[2]);
+ //handle underflow/negative zero
+ if (dev < BMDS_EPS){
+ dev = 0.0;
+ }
+ //TOI->llRatio[2] = dev = 2 * (aod->LL[1] - aod->LL[2]);
+ aod->TOI.llRatio[2] = dev = 2*dev;
+ aod->TOI.DF[2] = df = aod->nParms[1] - aod->nParms[2];
+ aod->TOI.pVal[2] = (std::isnan(dev) || dev < 0.0 || df < 0.0) ? -1 : 1.0 - gsl_cdf_chisq_P(dev, df);
+
+ //Test #4 - A3 vs Fitted - does the fitted model describe the obs data adequately
+ dev = (aod->LL[2] - aod->LL[3]);
+ //handle underflow/negative zero
+ if (dev < BMDS_EPS){
+ dev = 0.0;
+ }
+ //TOI->llRatio[3] = dev = 2 * (aod->LL[2] - aod->LL[3]);
+ aod->TOI.llRatio[3] = dev = 2*dev;
+ aod->TOI.DF[3] = df = aod->nParms[2] - aod->nParms[3];
+ aod->TOI.pVal[3] = (std::isnan(dev) || dev < 0.0 || df < 0.0) ? -1 : 1.0 - gsl_cdf_chisq_P(dev, df);
+
+ //DOF check for test 4
+ if (aod->TOI.DF[3] <= 0) {
+ aod->TOI.pVal[3] = BMDS_MISSING;
+ }
+}
+
+void determineAdvDir(struct continuous_analysis *CA){
+ int n_rows;
+
+ struct continuous_analysis CAnew;
+ //allocate memory for individual size. Will not use entire array for summary data.
+ double* tmpD = (double*)malloc(CA->n * sizeof(double));
+ double* tmpY = (double*)malloc(CA->n * sizeof(double));
+ double* tmpN = (double*)malloc(CA->n * sizeof(double));
+ double* tmpSD = (double*)malloc(CA->n * sizeof(double));
+
+ CAnew.doses = tmpD;
+ CAnew.Y = tmpY;
+ CAnew.n_group = tmpN;
+ CAnew.sd = tmpSD;
+
+ if(!CA->suff_stat){
+ bmdsConvertSStat(CA, &CAnew,true);
+ n_rows = CAnew.n;
+ } else {
+ //already suff stat
+ n_rows = CA->n;
+ }
+ Eigen::MatrixXd X(n_rows,2);
+ Eigen::MatrixXd W(n_rows,n_rows);
+ Eigen::VectorXd Y(n_rows);
+ Eigen::VectorXd beta(2);
+
+ if(!CA->suff_stat){
+ for (int i=0; i< n_rows; i++){
+ X(i,0) = 1;
+ X(i,1) = CAnew.doses[i];
+
+ W(i,i) = CAnew.n_group[i];
+ Y(i) = CAnew.Y[i];
+ }
+
+ } else {
+ for (int i=0; i< n_rows; i++){
+ X(i,0) = 1;
+ X(i,1) = CA->doses[i];
+
+ W(i,i) = CA->n_group[i];
+ Y(i) = CA->Y[i];
+ }
+ }
+
+
+ beta = (X.transpose() * W * X).colPivHouseholderQr().solve(X.transpose()*W*Y);
+
+ if (beta(1) > 0 ) {
+ CA->isIncreasing = true;
+ } else {
+ CA->isIncreasing = false;
+ }
+
+ free(tmpD);
+ free(tmpY);
+ free(tmpN);
+ free(tmpSD);
+}
+
+
+
+void bmdsConvertSStat(struct continuous_analysis *CA, struct continuous_analysis *CAss, bool clean){
+ //standardize the data
+ int n_rows = CA->n;
+ int n_cols = CA->suff_stat?3:1;
+
+ if(!CA->suff_stat){
+ Eigen::MatrixXd Yin(n_rows,n_cols);
+ Eigen::MatrixXd Xin(n_rows,1);
+ Eigen::MatrixXd SSTAT, SSTAT_LN, X, Y;
+ // copy the original data
+ for (int i = 0; i < n_rows; i++){
+ Yin(i,0) = CA->Y[i];
+ Xin(i,0) = CA->doses[i];
+ }
+ bool canConvert = convertSStat(Yin, Xin, &SSTAT, &SSTAT_LN, &X);
+ bool isLognormal = CAss->disttype == distribution::log_normal;
+ if (canConvert){
+
+ //bool isLognormal = CA->disttype == distribution::log_normal;
+ if (clean){
+ if (isLognormal) {
+ //Y = cleanSuffStat(SSTAT,X,false);
+ Y = cleanSuffStat(SSTAT_LN,X,true,false);
+ } else {
+// //Y = cleanSuffStat(SSTAT_LN,X,true);
+ Y = cleanSuffStat(SSTAT,X,false,false);
+ }
+ } else {
+ if (isLognormal) {
+ Y = SSTAT_LN;
+ } else {
+ Y = SSTAT;
+ }
+ }
+ n_rows = X.rows();
+
+ for(int i=0; idoses[i] = X(i);
+ CAss->Y[i] = Y.col(0)[i];
+ CAss->n_group[i] = Y.col(1)[i];
+ CAss->sd[i] = Y.col(2)[i];
+ }
+ CAss->n = n_rows;
+ }
+ } else {
+ //already suff_stat, so just copy data to arrays
+ for (int i=0; idoses[i] = CA->doses[i];
+ CAss->Y[i] = CA->Y[i];
+ CAss->n_group[i] = CA->n_group[i];
+ CAss->sd[i] = CA->sd[i];
+ CAss->n = CA->n;
+ }
+
+ }
+ CAss->model = CA->model;
+ CAss->isIncreasing = CA->isIncreasing;
+ CAss->BMR = CA->BMR;
+ CAss->tail_prob = CA->tail_prob;
+ CAss->disttype = CA->disttype;
+ CAss->alpha = CA->alpha;
+ CAss->samples = CA->samples;
+ CAss->degree = CA->degree;
+ CAss->burnin = CA->burnin;
+ CAss->parms = CA->parms;
+ CAss->prior_cols = CA->prior_cols;
+
+}
+
+
+//replace any double values containing NaN or infinity with BMDS_MISSING
+void clean_dicho_results(struct dichotomous_model_result *res, struct dichotomous_GOF *gof, struct BMDS_results *bmdsRes, struct dicho_AOD *aod) {
+
+ //dichotomous_model_result
+ for (int i=0; inparms; i++){
+ cleanDouble(&res->parms[i]);
+ }
+
+ for (int i=0; inparms*res->nparms; i++){
+ cleanDouble(&res->cov[i]);
+ }
+
+ cleanDouble(&res->max);
+ cleanDouble(&res->model_df);
+ cleanDouble(&res->total_df);
+ cleanDouble(&res->bmd);
+
+ for (int i=0; idist_numE*2; i++){
+ cleanDouble(&res->bmd_dist[i]);
+ }
+
+ //dichotomous_GOF
+ for (int i=0; in; i++){
+ cleanDouble(&gof->expected[i]);
+// cleanDouble(&gof->residual[i]);
+ cleanDouble(&gof->residual[i]);
+ cleanDouble(&gof->ebLower[i]);
+ cleanDouble(&gof->ebUpper[i]);
+ }
+ cleanDouble(&gof->test_statistic);
+ cleanDouble(&gof->p_value);
+ cleanDouble(&gof->df);
+
+ //BMDS_results
+ cleanDouble(&bmdsRes->BMD);
+ cleanDouble(&bmdsRes->BMDL);
+ cleanDouble(&bmdsRes->BMDU);
+ cleanDouble(&bmdsRes->AIC);
+ cleanDouble(&bmdsRes->BIC_equiv);
+ cleanDouble(&bmdsRes->chisq);
+
+ for (int i=0; inparms; i++){
+ cleanDouble(&bmdsRes->stdErr[i]);
+ cleanDouble(&bmdsRes->lowerConf[i]);
+ cleanDouble(&bmdsRes->upperConf[i]);
+ }
+
+ //dicho_AOD
+ cleanDouble(&aod->fullLL);
+ cleanDouble(&aod->redLL);
+ cleanDouble(&aod->fittedLL);
+ cleanDouble(&aod->devFit);
+ cleanDouble(&aod->devRed);
+ cleanDouble(&aod->pvFit);
+ cleanDouble(&aod->pvRed);
+
+}
+
+void clean_cont_results(struct continuous_model_result *res, struct BMDS_results *bmdsRes, struct continuous_AOD *aod, struct continuous_GOF *gof){
+
+ //continuous_model_result
+ for (int i=0; inparms; i++){
+ cleanDouble(&res->parms[i]);
+ }
+ for (int i=0; inparms*res->nparms; i++){
+ cleanDouble(&res->cov[i]);
+ }
+
+ cleanDouble(&res->max);
+ cleanDouble(&res->model_df);
+ cleanDouble(&res->total_df);
+ cleanDouble(&res->bmd);
+ for (int i=0; idist_numE*2; i++){
+ cleanDouble(&res->bmd_dist[i]);
+ }
+
+ //BMDS_results
+ cleanDouble(&bmdsRes->BMD);
+ cleanDouble(&bmdsRes->BMDL);
+ cleanDouble(&bmdsRes->BMDU);
+ cleanDouble(&bmdsRes->AIC);
+ cleanDouble(&bmdsRes->BIC_equiv);
+ cleanDouble(&bmdsRes->chisq);
+ for (int i=0; inparms; i++){
+ cleanDouble(&bmdsRes->stdErr[i]);
+ cleanDouble(&bmdsRes->lowerConf[i]);
+ cleanDouble(&bmdsRes->upperConf[i]);
+ }
+
+ //continuous_AOD and testsOfInterest
+ for (int i=0; i<5; i++){
+ cleanDouble(&aod->LL[i]);
+ cleanDouble(&aod->AIC[i]);
+ for (int j=0; j<4; j++){
+ cleanDouble(&aod->TOI.llRatio[j]);
+ cleanDouble(&aod->TOI.DF[j]);
+ cleanDouble(&aod->TOI.pVal[j]);
+ }
+ }
+ cleanDouble(&aod->addConst);
+
+ //continuous_GOF
+ for (int i=0; in; i++){
+ cleanDouble(&gof->dose[i]);
+ cleanDouble(&gof->size[i]);
+ cleanDouble(&gof->estMean[i]);
+ cleanDouble(&gof->calcMean[i]);
+ cleanDouble(&gof->obsMean[i]);
+ cleanDouble(&gof->estSD[i]);
+ cleanDouble(&gof->calcSD[i]);
+ cleanDouble(&gof->obsSD[i]);
+ cleanDouble(&gof->res[i]);
+ cleanDouble(&gof->ebLower[i]);
+ cleanDouble(&gof->ebUpper[i]);
+ }
+
+}
+
+void clean_dicho_MA_results(struct dichotomousMA_result *res, struct BMDSMA_results *bmdsRes) {
+
+ //dichotomousMA_result
+ for (int i=0; inmodels; i++){
+ cleanDouble(&res->post_probs[i]);
+ for (int j=0; jmodels[i]->nparms; j++){
+ cleanDouble(&res->models[i]->parms[j]);
+ }
+ for (int j=0; jmodels[i]->nparms*res->models[i]->nparms; j++){
+ cleanDouble(&res->models[i]->cov[j]);
+ }
+ cleanDouble(&res->models[i]->max);
+ cleanDouble(&res->models[i]->model_df);
+ cleanDouble(&res->models[i]->total_df);
+ cleanDouble(&res->models[i]->bmd);
+ for (int j=0; jmodels[i]->dist_numE*2; j++){
+ cleanDouble(&res->models[i]->bmd_dist[j]);
+ }
+ }
+ for (int i=0; idist_numE*2; i++){
+ cleanDouble(&res->bmd_dist[i]);
+ }
+
+ //BMDSMA_results
+ cleanDouble(&bmdsRes->BMD_MA);
+ cleanDouble(&bmdsRes->BMDL_MA);
+ cleanDouble(&bmdsRes->BMDU_MA);
+ for (int i=0; inmodels; i++){
+ cleanDouble(&bmdsRes->BMD[i]);
+ cleanDouble(&bmdsRes->BMDL[i]);
+ cleanDouble(&bmdsRes->BMDU[i]);
+ }
+
+}
+
+
+
+//replace any double values containing NaN or infinity with BMDS_MISSING
+void cleanDouble(double *val){
+ if(!isfinite(*val)) {
+ *val = BMDS_MISSING;
+ }
+}
+
+
+string BMDS_ENTRY_API __stdcall version(){
+ return BMDS_VERSION;
+}
+
+
+int BMDS_ENTRY_API __stdcall add2(int i, int j) {
+ return i + j;
+}
+
+
+void convertToPythonDichoRes(struct dichotomous_model_result *res, struct python_dichotomous_model_result *pyRes){
+
+ pyRes->model = res->model;
+ pyRes->nparms = res->nparms;
+ pyRes->parms.assign(res->parms, res->parms + res->nparms);
+ pyRes->cov.assign(res->cov, res->cov + res->nparms*res->nparms);
+ pyRes->max = res->max;
+ pyRes->dist_numE = res->dist_numE;
+ pyRes->model_df = res->model_df;
+ pyRes->total_df = res->total_df;
+ pyRes->bmd_dist.assign(res->bmd_dist, res->bmd_dist + res->dist_numE*2);
+ pyRes->bmd = res->bmd;
+ pyRes->gof_p_value = res->gof_p_value;
+ pyRes->gof_chi_sqr_statistic = res->gof_chi_sqr_statistic;
+
+}
+
+void convertFromPythonDichoAnalysis(struct dichotomous_analysis *anal, struct python_dichotomous_analysis *pyAnal){
+
+ anal->model = pyAnal->model;
+ anal->n = pyAnal->n;
+ anal->BMD_type = pyAnal->BMD_type;
+ anal->BMR = pyAnal->BMR;
+ anal->alpha = pyAnal->alpha;
+ anal->degree = pyAnal->degree;
+ anal->samples = pyAnal->samples;
+ anal->burnin = pyAnal->burnin;
+ anal->parms = pyAnal->parms;
+ anal->prior_cols = pyAnal->prior_cols;
+
+ if(pyAnal->n == pyAnal->doses.size() && pyAnal->doses.size() == pyAnal->Y.size() && pyAnal->doses.size() == pyAnal->n_group.size()){
+ for (int i=0; in; i++){
+ anal->Y[i] = pyAnal->Y[i];
+ anal->doses[i] = pyAnal->doses[i];
+ anal->n_group[i] = pyAnal->n_group[i];
+ }
+ }
+ if(pyAnal->prior.size() > 0){
+ for (int i=0; iprior.size(); i++){
+ anal->prior[i] = pyAnal->prior[i];
+ }
+ }
+}
+
+void convertFromPythonDichoRes(struct dichotomous_model_result *res, struct python_dichotomous_model_result *pyRes){
+ res->model = pyRes->model;
+ res->nparms = pyRes->nparms;
+ res->max = pyRes->max;
+ res->dist_numE = pyRes->dist_numE;
+ res->model_df = pyRes->model_df;
+ res->total_df = pyRes->total_df;
+ res->bmd = pyRes->bmd;
+ res->gof_p_value = pyRes->gof_p_value;
+ res->gof_chi_sqr_statistic = pyRes->gof_chi_sqr_statistic;
+ //arrays & vectors
+ if(pyRes->parms.size() > 0){
+ for (int i=0; iparms.size(); i++){
+ res->parms[i] = pyRes->parms[i];
+ }
+ }
+ if(pyRes->cov.size() > 0){
+ for (int i=0; icov.size(); i++){
+ res->cov[i] = pyRes->cov[i];
+ }
+ }
+ if(pyRes->bmd_dist.size() > 0){
+ for (int i=0; ibmd_dist.size(); i++){
+ res->bmd_dist[i] = pyRes->bmd_dist[i];
+ }
+ }
+}
+
+void convertFromPythonDichoMAAnalysis(struct dichotomousMA_analysis *MA, struct python_dichotomousMA_analysis *pyMA){
+
+ MA->nmodels = pyMA->nmodels;
+ for (int i=0; inmodels; i++){
+ MA->priors[i] = pyMA->priors[i].data();
+ }
+ MA->nparms = pyMA->nparms.data();
+ MA->actual_parms = pyMA->actual_parms.data();
+ MA->prior_cols = pyMA->prior_cols.data();
+ MA->models = pyMA->models.data();
+ MA->modelPriors = pyMA->modelPriors.data();
+
+
+}
+
+void convertFromPythonDichoMARes(struct dichotomousMA_result *res, struct python_dichotomousMA_result *pyRes){
+
+ res->nmodels = pyRes->nmodels;
+ for (int i=0; inmodels; i++){
+ //res->models[i] = pyRes->models[i];
+ convertFromPythonDichoRes(res->models[i], &pyRes->models[i]);
+ }
+ res->dist_numE = pyRes->dist_numE;
+ res->post_probs = pyRes->post_probs.data();
+ res->bmd_dist = pyRes->bmd_dist.data();
+
+}
+
+void convertFromPythonContAnalysis(struct continuous_analysis *anal, struct python_continuous_analysis *pyAnal){
+
+ anal->model = pyAnal->model;
+ anal->n = pyAnal->n;
+ anal->BMD_type = pyAnal->BMD_type;
+ anal->isIncreasing = pyAnal->isIncreasing;
+ anal->BMR = pyAnal->BMR;
+ anal->tail_prob = pyAnal->tail_prob;
+ anal->disttype = pyAnal->disttype;
+ anal->alpha = pyAnal->alpha;
+ anal->degree = pyAnal->degree;
+ anal->samples = pyAnal->samples;
+ anal->burnin = pyAnal->burnin;
+ anal->parms = pyAnal->parms;
+ anal->prior_cols = pyAnal->prior_cols;
+ anal->transform_dose = pyAnal->transform_dose;
+ anal->suff_stat = pyAnal->suff_stat;
+
+ bool validated = false;
+ if (pyAnal->suff_stat){
+ validated = pyAnal->n == pyAnal->doses.size() && pyAnal->doses.size() == pyAnal->Y.size() && pyAnal->doses.size() == pyAnal->n_group.size();
+ } else {
+ validated = pyAnal->n == pyAnal->doses.size() && pyAnal->doses.size() == pyAnal->Y.size();
+ }
+
+ if (validated){
+ for (int i=0; in; i++){
+ anal->Y[i] = pyAnal->Y[i];
+ anal->doses[i] = pyAnal->doses[i];
+ }
+ }
+ if (validated && pyAnal->suff_stat){
+ for (int i=0; in; i++){
+ anal->n_group[i] = pyAnal->n_group[i];
+ anal->sd[i] = pyAnal->sd[i];
+ }
+ }
+ if(pyAnal->prior.size() > 0){
+ for (int i=0; iprior.size(); i++){
+ anal->prior[i] = pyAnal->prior[i];
+ }
+ }
+}
+
+void convertFromPythonContRes(struct continuous_model_result *res, struct python_continuous_model_result *pyRes){
+ res->model = pyRes->model;
+ res->dist = pyRes->dist;
+ res->nparms = pyRes->nparms;
+ res->max = pyRes->max;
+ res->dist_numE = pyRes->dist_numE;
+ res->model_df = pyRes->model_df;
+ res->total_df = pyRes->total_df;
+ res->bmd = pyRes->bmd;
+ //arrays & vectors
+ if(pyRes->parms.size() > 0){
+ for (int i=0; iparms.size(); i++){
+ res->parms[i] = pyRes->parms[i];
+ }
+ }
+ if(pyRes->cov.size() > 0){
+ for (int i=0; icov.size(); i++){
+ res->cov[i] = pyRes->cov[i];
+ }
+ }
+ if(pyRes->bmd_dist.size() > 0){
+ for (int i=0; ibmd_dist.size(); i++){
+ res->bmd_dist[i] = pyRes->bmd_dist[i];
+ }
+ }
+}
+
+void convertToPythonContRes(struct continuous_model_result *res, struct python_continuous_model_result *pyRes){
+
+ pyRes->model = res->model;
+ pyRes->dist = res->dist;
+ pyRes->nparms = res->nparms;
+ pyRes->parms.assign(res->parms, res->parms + res->nparms);
+ pyRes->cov.assign(res->cov, res->cov + res->nparms*res->nparms);
+ pyRes->max = res->max;
+ pyRes->dist_numE = res->dist_numE;
+ pyRes->model_df = res->model_df;
+ pyRes->total_df = res->total_df;
+ pyRes->bmd_dist.assign(res->bmd_dist, res->bmd_dist + res->dist_numE*2);
+ pyRes->bmd = res->bmd;
+
+}
+
+void BMDS_ENTRY_API __stdcall pythonBMDSDicho(struct python_dichotomous_analysis *pyAnal, struct python_dichotomous_model_result *pyRes){
+
+ //1st convert from python struct
+ dichotomous_analysis anal;
+ anal.Y = new double[pyAnal->n];
+ anal.doses = new double[pyAnal->n];
+ anal.n_group = new double[pyAnal->n];
+ anal.prior = new double[pyAnal->parms*pyAnal->prior_cols];
+ convertFromPythonDichoAnalysis(&anal, pyAnal);
+
+ dichotomous_model_result res;
+ res.parms = new double[pyRes->nparms];
+ res.cov = new double[pyRes->nparms*pyRes->nparms];
+ res.bmd_dist = new double[pyRes->dist_numE*2];
+ convertFromPythonDichoRes(&res, pyRes);
+
+ runBMDSDichoAnalysis(&anal, &res, &pyRes->gof, &pyRes->bmdsRes, &pyRes->aod);
+
+ convertToPythonDichoRes(&res, pyRes);
+
+}
+
+void BMDS_ENTRY_API __stdcall pythonBMDSDichoMA(struct python_dichotomousMA_analysis *pyMA, struct python_dichotomousMA_result *pyRes){
+
+//convert python_dichtomousMA_analysis to dichotomousMA_analysis
+ dichotomousMA_analysis MA;
+ MA.priors = new double *[pyMA->nmodels];
+ MA.nparms = new int[pyMA->nmodels];
+ MA.actual_parms = new int[pyMA->nmodels];
+ MA.prior_cols = new int[pyMA->nmodels];
+ MA.models = new int[pyMA->nmodels];
+ MA.modelPriors = new double[pyMA->nmodels];
+ convertFromPythonDichoMAAnalysis(&MA, pyMA);
+
+//convert python_dichotomous_analysis to dichotomous_analysis
+ dichotomous_analysis DA;
+ DA.Y = new double[pyMA->pyDA.n];
+ DA.doses = new double[pyMA->pyDA.n];
+ DA.n_group = new double[pyMA->pyDA.n];
+ DA.prior = new double[pyMA->pyDA.parms*pyMA->pyDA.prior_cols];
+ convertFromPythonDichoAnalysis(&DA, &pyMA->pyDA);
+
+// convertFromPythonDichoMARes(&res, pyRes);
+
+ struct dichotomous_model_result** res = new struct dichotomous_model_result* [pyRes->nmodels];
+ for (int i=0; inmodels; i++){
+ res[i] = (struct dichotomous_model_result*)malloc(sizeof(struct dichotomous_model_result));
+ res[i]->model = pyRes->models[i].model;
+ res[i]->nparms = pyRes->models[i].nparms;
+ res[i]->dist_numE = pyRes->models[i].dist_numE;
+ res[i]->parms = (double*)malloc(sizeof(double)*pyRes->models[i].nparms);
+ res[i]->cov = (double*)malloc(sizeof(double)*pyRes->models[i].nparms*pyRes->models[i].nparms);
+ res[i]->bmd_dist = (double*)malloc(sizeof(double)*pyRes->models[i].dist_numE*2);
+ }
+
+ struct dichotomousMA_result maRes;
+ maRes.nmodels = pyRes->nmodels;
+ maRes.models = res;
+ maRes.dist_numE = pyRes->dist_numE;
+ maRes.post_probs = new double[pyRes->nmodels];
+ maRes.bmd_dist = new double[pyRes->dist_numE*2];
+
+ runBMDSDichoMA(&MA, &DA, &maRes, &pyRes->bmdsRes);
+//convert back to python objs
+//pyDA should not change
+ pyRes->post_probs.resize(pyRes->nmodels);
+ for(int i=0; inmodels; i++){
+ pyRes->post_probs[i] = maRes.post_probs[i];
+ pyRes->models[i].max = maRes.models[i]->max;
+ pyRes->models[i].model_df = maRes.models[i]->model_df;
+ pyRes->models[i].total_df = maRes.models[i]->total_df;
+ pyRes->models[i].bmd = maRes.models[i]->bmd;
+ pyRes->models[i].gof_p_value = maRes.models[i]->gof_p_value;
+ pyRes->models[i].gof_chi_sqr_statistic = maRes.models[i]->gof_chi_sqr_statistic;
+ int nparms = pyRes->models[i].nparms;
+
+ pyRes->models[i].parms.resize(nparms);
+ for(int j=0; jmodels[i].parms[j] = maRes.models[i]->parms[j];
+ }
+ pyRes->models[i].cov.resize(nparms*nparms);
+ for(int j=0; jmodels[i].cov[j] = maRes.models[i]->cov[j];
+ }
+ pyRes->models[i].bmd_dist.resize(pyRes->dist_numE*2);
+ for(int j=0; jdist_numE*2; j++){
+ pyRes->models[i].bmd_dist[j] = maRes.models[i]->bmd_dist[j];
+ }
+ }
+ pyRes->bmd_dist.resize(pyRes->dist_numE*2);
+ for(int i=0; idist_numE*2; i++){
+ pyRes->bmd_dist[i] = maRes.bmd_dist[i];
+ }
+
+}
+
+void BMDS_ENTRY_API __stdcall pythonBMDSCont(struct python_continuous_analysis *pyAnal, struct python_continuous_model_result *pyRes){
+
+ //convert pyAnal to anal
+ continuous_analysis anal;
+ anal.Y = new double[pyAnal->n];
+ anal.doses = new double[pyAnal->n];
+ anal.sd = new double[pyAnal->n];
+ anal.n_group = new double[pyAnal->n];
+ anal.prior = new double[pyAnal->parms*pyAnal->prior_cols];
+ convertFromPythonContAnalysis(&anal, pyAnal);
+
+ //convert pyRes to res
+ continuous_model_result res;
+ res.parms = new double[pyRes->nparms];
+ res.cov = new double[pyRes->nparms*pyRes->nparms];
+ res.bmd_dist = new double[pyRes->dist_numE*2];
+ convertFromPythonContRes(&res, pyRes);
+
+ runBMDSContAnalysis(&anal, &res, &pyRes->bmdsRes, &pyRes->aod, &pyRes->gof, &pyAnal->detectAdvDir, &pyAnal->restricted);
+
+ convertToPythonContRes(&res, pyRes);
+
+}
+
+void BMDS_ENTRY_API __stdcall pythonBMDSMultitumor(struct python_multitumor_analysis *pyAnal, struct python_multitumor_result *pyRes){
+
+ //run each individual multistage model
+ for (int i=0;indatasets;i++){
+ //only all individual multistage models if degree == 0
+ //else only run the specified model
+ if (pyAnal->degree[i] ==0){
+ for (int j=0; jnmodels[i]; j++){
+ pythonBMDSDicho(&pyAnal->models[i][j], &pyRes->models[i][j]);
+ }
+ } else {
+ pythonBMDSDicho(&pyAnal->models[i][0], &pyRes->models[i][0]);
+ }
+ }
+
+ //select models
+ selectMultitumorModel(pyAnal, pyRes);
+
+ //create new pyAnal and pyRes only containing selected models
+ //Note: ndatasets may differ from previous structs (pyAnal & pyRes) depending on whether any datasets were rejected
+ struct python_multitumor_analysis anal;
+ struct python_multitumor_result res;
+
+
+ anal.n = pyAnal->n;
+ anal.BMR = pyAnal->BMR;
+ anal.alpha = pyAnal->alpha;
+ anal.prior_cols = pyAnal->prior_cols;
+ pyRes->validResult.clear();
+ for (int dataset=0; datasetndatasets; dataset++){
+ int selIndex = pyRes->selectedModelIndex[dataset];
+ if (selIndex >= 0){
+ pyRes->validResult.push_back(true);
+ std::vector modGroup;
+ std::vector modResGroup;
+ struct python_dichotomous_analysis modAnal;
+ struct python_dichotomous_model_result modRes;
+
+ anal.nmodels.push_back(1);
+ modAnal = pyAnal->models[dataset][selIndex];
+ modGroup.push_back(modAnal);
+ anal.degree.push_back(modAnal.degree);
+ anal.models.push_back(modGroup);
+ anal.n.push_back(modAnal.n);
+ anal.ndatasets++;
+
+ res.nmodels.push_back(1);
+ res.selectedModelIndex.push_back(0); //selected index is always zero since only one model
+ modRes = pyRes->models[dataset][selIndex];
+ modResGroup.push_back(modRes);
+ res.models.push_back(modResGroup);
+
+ } else {
+ //std::cout<<"Warning: multistage model not selected for dataset:"<validResult.push_back(false);
+ }
+ }
+ anal.ndatasets = anal.nmodels.size();
+ res.ndatasets = anal.ndatasets;
+
+// std::cout<<"running multitumor model with "<BMD = res.BMD;
+ pyRes->BMDL = res.BMDL;
+ pyRes->BMDU = res.BMDU;
+ pyRes->slopeFactor = res.slopeFactor;
+ pyRes->combined_LL = res.combined_LL;
+ pyRes->combined_LL_const = res.combined_LL_const;
+}
+
+
+void selectMultitumorModel(struct python_multitumor_analysis *pyAnal, struct python_multitumor_result *pyRes){
+
+
+ if (pyRes->selectedModelIndex.size() > 0){
+ pyRes->selectedModelIndex.clear();
+ }
+
+ for (int i=0; indatasets; i++){
+ if(pyAnal->degree[i] == 0){
+ int selectedIndex = selectBestMultitumorModel(pyAnal->models[i], pyRes->models[i]);
+ pyRes->selectedModelIndex.push_back(selectedIndex);
+ } else {
+ //in this case the only the user selected model was run
+ pyRes->selectedModelIndex.push_back(0);
+ }
+ }
+
+}
+
+int selectBestMultitumorModel(std::vector &analModels, std::vector &resModels){
+
+ double bmdDoseSRLimit = 2.0; // scaled residual at BMD dose < value
+ double controlDoseSRLimit = 2.0; // scaled residual at control dose < value
+ double pValueMin = 0.05; // p-value > value
+
+ std::vector modelHitBoundary; //checks whether specific model hit boundary
+ std::vector adequateFit;
+ bool anyHitBoundary = false;
+ bool anyAdequateFit = false;
+
+ for (int i=0; i bounded = resModels[i].bmdsRes.bounded;
+ for (int j=0; j pValueMin
+ //Note: these values are all currently hardcoded. Need to account for/allow user changes
+ adequateFit.push_back(false);
+ if (resModels[i].getSRAtDose(resModels[i].bmdsRes.BMD, analModels[i].doses) < bmdDoseSRLimit && resModels[i].gof.residual[0] < controlDoseSRLimit && resModels[i].gof.p_value > pValueMin){
+ adequateFit[i] = true;
+ }
+ }
+
+
+ //first only examine models up to k-2
+ anyHitBoundary = false;
+ anyAdequateFit = false;
+ for (int i=0; i modelAIC;
+ for (int i=0; i 0.0){
+ return bmr/bmdl;
+ } else {
+ return BMDS_MISSING;
+ }
+}
+
+void BMDS_ENTRY_API __stdcall runMultitumorModel(struct python_multitumor_analysis *pyAnal, struct python_multitumor_result *pyRes){
+
+
+ //calculate slopeFactor for all individual models
+ for (int i=0; indatasets; i++){
+ for (int j=0; jnmodels[i]; j++){
+ double bmr = pyAnal->models[i][j].BMR;
+ pyRes->models[i][j].bmdsRes.setSlopeFactor(bmr);
+ }
+ }
+
+
+ //calculate combined LL & constant
+ pyRes->combined_LL = 0.0;
+ pyRes->combined_LL_const = 0.0;
+ for (int i=0; indatasets; i++){
+ int selIndex = pyRes->selectedModelIndex[i];
+ pyRes->combined_LL += pyRes->models[i][selIndex].max;
+ pyRes->combined_LL_const += LogLik_Constant(pyAnal->models[i][selIndex].Y, pyAnal->models[i][selIndex].n_group);
+ }
+ pyRes->combined_LL = pyRes->combined_LL*-1;
+ pyRes->combined_LL_const = pyRes->combined_LL_const*-1;
+
+
+ Multistage_ComboBMD(pyAnal, pyRes);
+
+ pyRes->setSlopeFactor(pyAnal->BMR);
+
+}
+
+
+
+
+
+void BMDS_ENTRY_API __stdcall pythonBMDSNested(struct python_nested_analysis *pyAnal, struct python_nested_result *pyRes){
+
+ //stubbed results
+ pyRes->bmdsRes.validResult = true;
+ pyRes->bmdsRes.BMD = 12.95166613;
+ pyRes->bmdsRes.BMDL = 9.643478831;
+ pyRes->bmdsRes.BMDU = (double)BMDS_MISSING;
+
+ pyRes->bmdsRes.AIC = 546.9572409;
+ pyRes->bmdsRes.chisq = 19.6087053;
+ pyRes->combPVal = 0.994;
+ pyRes->df = 35;
+
+ pyRes->nparms = 9;
+ pyRes->parms.push_back(0.084733516);
+ pyRes->parms.push_back(-4.109651594);
+ pyRes->parms.push_back(0.004761366);
+ pyRes->parms.push_back(-0.055489253);
+ pyRes->parms.push_back(1.0);
+ pyRes->parms.push_back(0);
+ pyRes->parms.push_back(0);
+ pyRes->parms.push_back(0);
+ pyRes->parms.push_back(0);
+
+ pyAnal->seed = 1687267999;
+ //pyAnal->iterations = 1000;
+ pyRes->LL = -269.478205;
+ pyRes->obsChiSq = 19.6087053;
+
+ pyRes->boot.numRuns = 3;
+ pyRes->boot.pVal.push_back(0.994);
+ pyRes->boot.pVal.push_back(0.997);
+ pyRes->boot.pVal.push_back(0.991);
+ pyRes->boot.pVal.push_back(0.994);
+ pyRes->boot.perc50.push_back(38.79787451);
+ pyRes->boot.perc50.push_back(38.19554318);
+ pyRes->boot.perc50.push_back(37.75643018);
+ pyRes->boot.perc50.push_back(38.26776644);
+ pyRes->boot.perc90.push_back(51.1848655);
+ pyRes->boot.perc90.push_back(50.4569082);
+ pyRes->boot.perc90.push_back(50.3883043);
+ pyRes->boot.perc90.push_back(50.6118335);
+ pyRes->boot.perc95.push_back(54.69182);
+ pyRes->boot.perc95.push_back(53.99859);
+ pyRes->boot.perc95.push_back(54.18472);
+ pyRes->boot.perc95.push_back(54.55846);
+ pyRes->boot.perc99.push_back(60.479415);
+ pyRes->boot.perc99.push_back(63.639965);
+ pyRes->boot.perc99.push_back(61.778094);
+ pyRes->boot.perc99.push_back(62.421371);
+
+ pyRes->SRs.push_back(-0.31484);
+ pyRes->SRs.push_back(0.314837);
+ pyRes->SRs.push_back(-0.31484);
+ pyRes->SRs.push_back(0.314837);
+ pyRes->SRs.push_back(-0.31484);
+ pyRes->SRs.push_back(0.314837);
+
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(0);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(25);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(50);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+ pyRes->litter.dose.push_back(100);
+
+ pyRes->litter.LSC.push_back(9);
+ pyRes->litter.LSC.push_back(9);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(13);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(15);
+ pyRes->litter.LSC.push_back(16);
+ pyRes->litter.LSC.push_back(9);
+ pyRes->litter.LSC.push_back(9);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(12);
+ pyRes->litter.LSC.push_back(13);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(7);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(15);
+ pyRes->litter.LSC.push_back(8);
+ pyRes->litter.LSC.push_back(10);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(11);
+ pyRes->litter.LSC.push_back(12);
+ pyRes->litter.LSC.push_back(12);
+ pyRes->litter.LSC.push_back(13);
+ pyRes->litter.LSC.push_back(14);
+ pyRes->litter.LSC.push_back(14);
+
+ pyRes->litter.estProb.push_back(0.127585814);
+ pyRes->litter.estProb.push_back(0.127585814);
+ pyRes->litter.estProb.push_back(0.13234718);
+ pyRes->litter.estProb.push_back(0.13234718);
+ pyRes->litter.estProb.push_back(0.137108547);
+ pyRes->litter.estProb.push_back(0.14663128);
+ pyRes->litter.estProb.push_back(0.151392646);
+ pyRes->litter.estProb.push_back(0.151392646);
+ pyRes->litter.estProb.push_back(0.156154013);
+ pyRes->litter.estProb.push_back(0.160915379);
+ pyRes->litter.estProb.push_back(0.301527034);
+ pyRes->litter.estProb.push_back(0.301527034);
+ pyRes->litter.estProb.push_back(0.297781775);
+ pyRes->litter.estProb.push_back(0.297781775);
+ pyRes->litter.estProb.push_back(0.294373053);
+ pyRes->litter.estProb.push_back(0.291294677);
+ pyRes->litter.estProb.push_back(0.288539894);
+ pyRes->litter.estProb.push_back(0.286101463);
+ pyRes->litter.estProb.push_back(0.286101463);
+ pyRes->litter.estProb.push_back(0.286101463);
+ pyRes->litter.estProb.push_back(0.433391608);
+ pyRes->litter.estProb.push_back(0.410232266);
+ pyRes->litter.estProb.push_back(0.410232266);
+ pyRes->litter.estProb.push_back(0.40315061);
+ pyRes->litter.estProb.push_back(0.40315061);
+ pyRes->litter.estProb.push_back(0.40315061);
+ pyRes->litter.estProb.push_back(0.40315061);
+ pyRes->litter.estProb.push_back(0.38390157);
+ pyRes->litter.estProb.push_back(0.38390157);
+ pyRes->litter.estProb.push_back(0.378161814);
+ pyRes->litter.estProb.push_back(0.572726279);
+ pyRes->litter.estProb.push_back(0.553298381);
+ pyRes->litter.estProb.push_back(0.543802864);
+ pyRes->litter.estProb.push_back(0.543802864);
+ pyRes->litter.estProb.push_back(0.534476942);
+ pyRes->litter.estProb.push_back(0.534476942);
+ pyRes->litter.estProb.push_back(0.52533783);
+ pyRes->litter.estProb.push_back(0.516402011);
+ pyRes->litter.estProb.push_back(0.516402011);
+
+ pyRes->litter.litterSize.push_back(9);
+ pyRes->litter.litterSize.push_back(9);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(13);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(15);
+ pyRes->litter.litterSize.push_back(16);
+ pyRes->litter.litterSize.push_back(9);
+ pyRes->litter.litterSize.push_back(9);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(12);
+ pyRes->litter.litterSize.push_back(13);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(7);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(15);
+ pyRes->litter.litterSize.push_back(8);
+ pyRes->litter.litterSize.push_back(10);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(11);
+ pyRes->litter.litterSize.push_back(12);
+ pyRes->litter.litterSize.push_back(12);
+ pyRes->litter.litterSize.push_back(13);
+ pyRes->litter.litterSize.push_back(14);
+ pyRes->litter.litterSize.push_back(14);
+
+ pyRes->litter.expected.push_back(1.148272326);
+ pyRes->litter.expected.push_back(1.148272326);
+ pyRes->litter.expected.push_back(1.323471805);
+ pyRes->litter.expected.push_back(1.323471805);
+ pyRes->litter.expected.push_back(1.508194016);
+ pyRes->litter.expected.push_back(1.906206638);
+ pyRes->litter.expected.push_back(2.119497049);
+ pyRes->litter.expected.push_back(2.119497049);
+ pyRes->litter.expected.push_back(2.342310192);
+ pyRes->litter.expected.push_back(2.574646069);
+ pyRes->litter.expected.push_back(2.713743308);
+ pyRes->litter.expected.push_back(2.713743308);
+ pyRes->litter.expected.push_back(2.977817749);
+ pyRes->litter.expected.push_back(2.977817749);
+ pyRes->litter.expected.push_back(3.238103583);
+ pyRes->litter.expected.push_back(3.495536119);
+ pyRes->litter.expected.push_back(3.751018618);
+ pyRes->litter.expected.push_back(4.005420479);
+ pyRes->litter.expected.push_back(4.005420479);
+ pyRes->litter.expected.push_back(4.005420479);
+ pyRes->litter.expected.push_back(3.033741255);
+ pyRes->litter.expected.push_back(4.102322662);
+ pyRes->litter.expected.push_back(4.102322662);
+ pyRes->litter.expected.push_back(4.434656714);
+ pyRes->litter.expected.push_back(4.434656714);
+ pyRes->litter.expected.push_back(4.434656714);
+ pyRes->litter.expected.push_back(4.434656714);
+ pyRes->litter.expected.push_back(5.374621982);
+ pyRes->litter.expected.push_back(5.374621982);
+ pyRes->litter.expected.push_back(5.672427209);
+ pyRes->litter.expected.push_back(4.581810233);
+ pyRes->litter.expected.push_back(5.532983808);
+ pyRes->litter.expected.push_back(5.981831502);
+ pyRes->litter.expected.push_back(5.981831502);
+ pyRes->litter.expected.push_back(6.413723301);
+ pyRes->litter.expected.push_back(6.413723301);
+ pyRes->litter.expected.push_back(6.829391788);
+ pyRes->litter.expected.push_back(7.229628148);
+ pyRes->litter.expected.push_back(7.229628148);
+
+ pyRes->litter.observed.push_back(0);
+ pyRes->litter.observed.push_back(1);
+ pyRes->litter.observed.push_back(1);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(3);
+ pyRes->litter.observed.push_back(3);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(1);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(5);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(1);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(3);
+ pyRes->litter.observed.push_back(6);
+ pyRes->litter.observed.push_back(6);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(3);
+ pyRes->litter.observed.push_back(2);
+ pyRes->litter.observed.push_back(5);
+ pyRes->litter.observed.push_back(5);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(5);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(5);
+ pyRes->litter.observed.push_back(6);
+ pyRes->litter.observed.push_back(5);
+ pyRes->litter.observed.push_back(4);
+ pyRes->litter.observed.push_back(6);
+ pyRes->litter.observed.push_back(6);
+ pyRes->litter.observed.push_back(8);
+ pyRes->litter.observed.push_back(8);
+ pyRes->litter.observed.push_back(7);
+ pyRes->litter.observed.push_back(6);
+ pyRes->litter.observed.push_back(6);
+
+ pyRes->litter.SR.push_back(-1.147257987);
+ pyRes->litter.SR.push_back(-0.148141348);
+ pyRes->litter.SR.push_back(-0.301860366);
+ pyRes->litter.SR.push_back(0.631328744);
+ pyRes->litter.SR.push_back(0.431109028);
+ pyRes->litter.SR.push_back(0.857594396);
+ pyRes->litter.SR.push_back(0.65653973);
+ pyRes->litter.SR.push_back(-0.089101984);
+ pyRes->litter.SR.push_back(-0.243481535);
+ pyRes->litter.SR.push_back(-1.071325161);
+ pyRes->litter.SR.push_back(-0.518421334);
+ pyRes->litter.SR.push_back(1.660602955);
+ pyRes->litter.SR.push_back(-0.676196332);
+ pyRes->litter.SR.push_back(-1.367732493);
+ pyRes->litter.SR.push_back(0.504037656);
+ pyRes->litter.SR.push_back(-0.314836859);
+ pyRes->litter.SR.push_back(1.376689417);
+ pyRes->litter.SR.push_back(1.179530167);
+ pyRes->litter.SR.push_back(-0.003205497);
+ pyRes->litter.SR.push_back(-0.594573329);
+ pyRes->litter.SR.push_back(-0.788462565);
+ pyRes->litter.SR.push_back(0.577118305);
+ pyRes->litter.SR.push_back(0.577118305);
+ pyRes->litter.SR.push_back(-0.267167737);
+ pyRes->litter.SR.push_back(-0.267167737);
+ pyRes->litter.SR.push_back(0.347496039);
+ pyRes->litter.SR.push_back(-0.267167737);
+ pyRes->litter.SR.push_back(-0.755412682);
+ pyRes->litter.SR.push_back(-0.205870559);
+ pyRes->litter.SR.push_back(0.174415333);
+ pyRes->litter.SR.push_back(0.298883377);
+ pyRes->litter.SR.push_back(-0.975099884);
+ pyRes->litter.SR.push_back(0.010998302);
+ pyRes->litter.SR.push_back(0.010998302);
+ pyRes->litter.SR.push_back(0.918022312);
+ pyRes->litter.SR.push_back(0.918022312);
+ pyRes->litter.SR.push_back(0.094758157);
+ pyRes->litter.SR.push_back(-0.65761782);
+ pyRes->litter.SR.push_back(-0.65761782);
+
+ pyRes->reduced.dose.push_back(0);
+ pyRes->reduced.dose.push_back(25);
+ pyRes->reduced.dose.push_back(50);
+ pyRes->reduced.dose.push_back(100);
+ pyRes->reduced.propAffect.push_back(0.14049587);
+ pyRes->reduced.propAffect.push_back(0.31034482);
+ pyRes->reduced.propAffect.push_back(0.38596491);
+ pyRes->reduced.propAffect.push_back(0.53333333);
+ pyRes->reduced.lowerConf.push_back(0.10100315);
+ pyRes->reduced.lowerConf.push_back(0.23266263);
+ pyRes->reduced.lowerConf.push_back(0.34156249);
+ pyRes->reduced.lowerConf.push_back(0.46300766);
+ pyRes->reduced.upperConf.push_back(0.19144442);
+ pyRes->reduced.upperConf.push_back(0.39977302);
+ pyRes->reduced.upperConf.push_back(0.43230026);
+ pyRes->reduced.upperConf.push_back(0.60240216);
+
+}
+
+
+double round_to(double value, double precision ){
+ double res = std::round(value / precision) * precision;
+ return res;
+}
+
+
diff --git a/src/code_base/bmds_helper.h b/src/code_base/bmds_helper.h
new file mode 100644
index 00000000..d2cbf433
--- /dev/null
+++ b/src/code_base/bmds_helper.h
@@ -0,0 +1,472 @@
+#ifdef __cplusplus
+ #include
+#endif
+//#include "bmdStruct.h"
+#include "dichotomous_entry_code.h"
+
+//define __stdcall to nothing if not on Windows platform
+#ifndef WIN32
+#define __stdcall
+#endif
+
+//define import/export attributes if on Windows platform
+#ifndef R_COMPILATION
+# ifndef _WIN32
+# define BMDS_ENTRY_API
+# else
+# ifdef RBMDS_EXPORTS
+# define BMDS_ENTRY_API __declspec(dllexport)
+# else
+# define BMDS_ENTRY_API __declspec(dllimport)
+# endif // BMDS_MODELS_EXPORTS
+# endif
+#else
+# define BMDS_ENTRY_API
+#endif
+
+const double BMDS_EPS = 1.0e-6;
+const double BMDS_MISSING = -9999.0;
+const double BMDS_QNORM = 1.959964; //bound for 95% confidence interval
+extern std::string BMDS_VERSION;
+
+enum nested_model {nlogistic =1, nctr=2};
+
+// BMDS helper structures
+#ifdef _WIN64
+#pragma pack(8)
+#elif _WIN32
+#pragma pack(4)
+#endif
+
+//forward declarations
+double calcSlopeFactor(double bmr, double bmdl);
+
+struct test_struct{
+ double BMD;
+ int n;
+ bool validResult;
+ std::vector doses;
+};
+// BMD_results:
+// Purpose - Contains various BMD values returned by BMDS.
+// It is used to facilitate returning results needed for BMDS software.
+struct BMDS_results{
+ double BMD;
+ double BMDL;
+ double BMDU;
+ double AIC;
+ double BIC_equiv;
+ double chisq;
+ std::vector bounded;
+ std::vector stdErr;
+ std::vector lowerConf;
+ std::vector upperConf;
+ bool validResult;
+ double slopeFactor;
+
+ void setSlopeFactor(double bmr){
+ slopeFactor = calcSlopeFactor(bmr, BMDL);
+ }
+};
+
+struct BMDSMA_results{
+ double BMD_MA;
+ double BMDL_MA;
+ double BMDU_MA;
+ std::vector BMD;
+ std::vector BMDL;
+ std::vector BMDU;
+ std::vector ebLower; //size is number of dose groups
+ std::vector ebUpper; //size is number of dose groups
+};
+
+//all arrays are length 4
+struct testsOfInterest {
+ std::vector llRatio;
+ std::vector DF;
+ std::vector pVal;
+};
+
+
+//all arrays are of length 5
+//Likelihoods of Interest
+//indexing of arrays are:
+//0 - A1 Model
+//1 - A2 Model
+//2 - A3 Model
+//3 - fitted Model
+//4 - R Model
+struct continuous_AOD{
+ std::vector LL;
+ std::vector nParms;
+ std::vector AIC;
+ double addConst;
+ struct testsOfInterest TOI;
+};
+
+struct dicho_AOD{
+ double fullLL; //A1; //fullLL - Full Model
+ int nFull; //N1; //NFull Full Model
+ double redLL; //A2; //redLL Reduced Model
+ int nRed; //N2; //NRed Reduced Model
+ double fittedLL;
+ int nFit;
+ double devFit;
+ double devRed;
+ int dfFit;
+ int dfRed;
+ double pvFit;
+ double pvRed;
+};
+
+
+//each array has number of dose groups in suff_stat data
+struct continuous_GOF {
+ std::vector dose;
+ std::vector size;
+ std::vector estMean;
+ std::vector calcMean;
+ std::vector obsMean;
+ std::vector estSD;
+ std::vector calcSD;
+ std::vector obsSD;
+ std::vector res;
+ int n; //total # of obs/doses
+ std::vector ebLower;
+ std::vector ebUpper;
+};
+
+struct dichotomous_GOF {
+ int n; // total number of observations obs/n
+ std::vector expected;
+ std::vector