Skip to content

Commit

Permalink
Norm the API (#873)
Browse files Browse the repository at this point in the history
* Norm of return_report

* removing name generation from functions

* Update kmeans.py

* format

* Removing endogtest

 - the test is wrong and there is no equivalent in other Python module = useless
  • Loading branch information
oualib authored Nov 16, 2023
1 parent 374099c commit c07b6d5
Show file tree
Hide file tree
Showing 20 changed files with 205 additions and 388 deletions.
12 changes: 0 additions & 12 deletions docs/source/stats.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,6 @@ Heteroscedascity
ols.het_goldfeldquandt
ols.het_white

____

Endogeneity
-------------

.. currentmodule:: verticapy.machine_learning.model_selection.statistical_tests

.. autosummary::
:toctree: api/

ols.endogtest


____

Expand Down
2 changes: 2 additions & 0 deletions verticapy/core/vdataframe/_corr.py
Original file line number Diff line number Diff line change
Expand Up @@ -1993,6 +1993,7 @@ def pacf(
input_relation=tmp_view_name,
X=[f"lag_{i}_{gen_name([column])}" for i in range(1, p)],
y=column,
return_report=True,
)
model.predict(vdf, name="prediction_0")
drop(tmp_lr1_name, method="model")
Expand All @@ -2001,6 +2002,7 @@ def pacf(
input_relation=tmp_view_name,
X=[f"lag_{i}_{gen_name([column])}" for i in range(1, p)],
y=f"lag_{p}_{gen_name([column])}",
return_report=True,
)
model.predict(vdf, name="prediction_p")
vdf.eval(expr=f"{column} - prediction_0", name="eps_0")
Expand Down
7 changes: 6 additions & 1 deletion verticapy/core/vdataframe/_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,12 @@ def discretize(
model.set_params(RFmodel_params)
parameters = model.get_params()
try:
model.fit(tmp_view_name, [self._alias], response)
model.fit(
tmp_view_name,
[self._alias],
response,
return_report=True,
)
query = [
f"""
(SELECT
Expand Down
7 changes: 6 additions & 1 deletion verticapy/machine_learning/model_selection/hp_tuning/cv.py
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,12 @@ def bayesian_search_cv(
hyper_param_estimator = vml.RandomForestRegressor(
name=estimator.model_name, **RFmodel_params
)
hyper_param_estimator.fit(relation, all_params, "score")
hyper_param_estimator.fit(
relation,
all_params,
"score",
return_report=True,
)
if random_grid:
vdf = gen_dataset(model_grid, nrows=nrows)
else:
Expand Down
12 changes: 10 additions & 2 deletions verticapy/machine_learning/model_selection/kmeans.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,11 @@ def best_k(
max_iter=max_iter,
tol=tol,
)
model.fit(input_relation, X)
model.fit(
input_relation,
X,
return_report=True,
)
score = model.elbow_score_
if score > elbow_score_stop:
return i
Expand Down Expand Up @@ -266,7 +270,11 @@ def elbow(
max_iter=max_iter,
tol=tol,
)
model.fit(input_relation, X)
model.fit(
input_relation,
X,
return_report=True,
)
elbow_score += [float(model.elbow_score_)]
between_cluster_ss += [float(model.between_cluster_ss_)]
total_ss += [float(model.total_ss_)]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,13 @@ def cross_validate(
test_size=float(1 / cv), order_by=[X[0]], random_state=random_state
)
start_time = time.time()
estimator.fit(train, X, y, test)
estimator.fit(
train,
X,
y,
test,
return_report=True,
)
total_time += [time.time() - start_time]
fun = estimator.report
kwargs = {"metrics": final_metrics}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
seasonal_decompose,
)
from verticapy.machine_learning.model_selection.statistical_tests.ols import (
endogtest,
het_breuschpagan,
het_goldfeldquandt,
het_white,
Expand Down
Loading

0 comments on commit c07b6d5

Please sign in to comment.