diff --git a/lumibot/data_sources/data_source.py b/lumibot/data_sources/data_source.py index 8b9b6148..b3651230 100644 --- a/lumibot/data_sources/data_source.py +++ b/lumibot/data_sources/data_source.py @@ -70,7 +70,9 @@ def get_historical_prices( self, asset, length, timestep="", timeshift=None, quote=None, exchange=None, include_after_hours=True ) -> Bars: """ - Get bars for a given asset + Get bars for a given asset, going back in time from now, getting length number of bars by timestep. + For example, with a length of 10 and a timestep of "1day", and now timeshift, this + would return the last 10 daily bars. Parameters ---------- diff --git a/lumibot/data_sources/tradier_data.py b/lumibot/data_sources/tradier_data.py index bc87d436..aab051de 100644 --- a/lumibot/data_sources/tradier_data.py +++ b/lumibot/data_sources/tradier_data.py @@ -1,6 +1,6 @@ import logging from collections import defaultdict -from datetime import datetime, date +from datetime import datetime, date, timedelta import pandas as pd import pytz @@ -204,8 +204,8 @@ def get_historical_prices( if timestep == 'day' and timeshift is None: # What we really want is the last n bars, not the bars from the last n days. - # get twice as many days as we need to ensure we get enough bars - tcal_start_date = end_date - (td * length * 2) + # get twice as many days as we need to ensure we get enough bars, then add 3 days for long weekends + tcal_start_date = end_date - (td * length * 2 + timedelta(days=3)) trading_days = get_trading_days(market='NYSE', start_date=tcal_start_date, end_date=end_date) # Filer out trading days when the market_open is after the end_date trading_days = trading_days[trading_days['market_open'] < end_date] @@ -242,7 +242,7 @@ def get_historical_prices( # if type of index is date, convert it to timestamp with timezone info of "America/New_York" if isinstance(df.index[0], date): - df.index = pd.to_datetime(df.index, utc=True).tz_convert("America/New_York") + df.index = pd.to_datetime(df.index).tz_localize("America/New_York") # Convert the dataframe to a Bars object bars = Bars(df, self.SOURCE, asset, raw=df, quote=quote) diff --git a/lumibot/data_sources/yahoo_data.py b/lumibot/data_sources/yahoo_data.py index 43777e45..2d079112 100644 --- a/lumibot/data_sources/yahoo_data.py +++ b/lumibot/data_sources/yahoo_data.py @@ -85,7 +85,7 @@ def _pull_source_symbol_bars( data = self._append_data(asset, data) if timestep == "day": - # Get the last minute of self._datetime to get the current bar + # Get the previous days bar dt = self._datetime.replace(hour=23, minute=59, second=59, microsecond=999999) end = dt - timedelta(days=1) else: diff --git a/lumibot/strategies/strategy_executor.py b/lumibot/strategies/strategy_executor.py index 80f2b34f..b046a97a 100644 --- a/lumibot/strategies/strategy_executor.py +++ b/lumibot/strategies/strategy_executor.py @@ -867,6 +867,8 @@ def _run_trading_session(self): if not broker_continue: return + # TODO: I think we should remove the OR. Pandas data can have dividends. + # Especially if it was saved from yahoo. if not has_data_source or (has_data_source and self.broker.data_source.SOURCE != "PANDAS"): self.strategy._update_cash_with_dividends() diff --git a/lumibot/tools/pandas.py b/lumibot/tools/pandas.py index 4e304d87..608613e1 100644 --- a/lumibot/tools/pandas.py +++ b/lumibot/tools/pandas.py @@ -55,7 +55,7 @@ def print_full_pandas_dataframes(): pd.set_option('display.width', 1000) -def set_pandas_float_precision(precision: int = 5): +def set_pandas_float_display_precision(precision: int = 5): format_str = '{:.' + str(precision) + 'f}' pd.set_option('display.float_format', format_str.format) diff --git a/lumibot/tools/polygon_helper.py b/lumibot/tools/polygon_helper.py index 3325a04c..469203f4 100644 --- a/lumibot/tools/polygon_helper.py +++ b/lumibot/tools/polygon_helper.py @@ -411,6 +411,20 @@ def get_missing_dates(df_all, asset, start, end): dates = pd.Series(df_all.index.date).unique() missing_dates = sorted(set(trading_dates) - set(dates)) + # TODO: This code works AFAIK, But when i enable it the tests for "test_polygon_missing_day_caching" and + # i don't know why nor how to fix this code or the tests. So im leaving it disabled for now. If you have problems + # with NANs in cached polygon data, you can try to enable this code and fix the tests. + + # # Find any dates with nan values in the df_all DataFrame + # missing_dates += df_all[df_all.isnull().all(axis=1)].index.date.tolist() + # + # # make sure the dates are unique + # missing_dates = list(set(missing_dates)) + # missing_dates.sort() + # + # # finally, filter out any dates that are not in start/end range (inclusive) + # missing_dates = [d for d in missing_dates if start.date() <= d <= end.date()] + return missing_dates diff --git a/setup.py b/setup.py index a4e3e878..df342be7 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ "appdirs", "pyarrow", "tqdm", - "lumiwealth-tradier>=0.1.12", + "lumiwealth-tradier>=0.1.14", "pytz", "psycopg2-binary", "exchange_calendars>=4.5.2", diff --git a/tests/test_bars.py b/tests/test_bars.py deleted file mode 100644 index 46546545..00000000 --- a/tests/test_bars.py +++ /dev/null @@ -1,234 +0,0 @@ -import os -from datetime import datetime, timedelta -import logging - -import pytest - -import pandas as pd -import pytz -from pandas.testing import assert_series_equal - -from lumibot.backtesting import PolygonDataBacktesting -from lumibot.data_sources import AlpacaData, TradierData, YahooData, PandasData -from tests.fixtures import pandas_data_fixture -from lumibot.tools import print_full_pandas_dataframes, set_pandas_float_precision -from lumibot.entities import Asset - -# Global parameters -# API Key for testing Polygon.io -from lumibot.credentials import POLYGON_API_KEY -from lumibot.credentials import TRADIER_CONFIG, ALPACA_CONFIG - - -logger = logging.getLogger(__name__) -# print_full_pandas_dataframes() -# set_pandas_float_precision(precision=15) - - -class TestDatasourceDailyBars: - """These tests check that the Bars returned from get_historical_prices. - - They test: - - the index is a timestamp - - they contain returns for the different data sources. - - they return the right number of bars - - returns are calculated correctly - - certain data_sources contain dividends - - """ - - length = 30 - ticker = "SPY" - asset = Asset("SPY") - timestep = "day" - expected_df = None - backtesting_start = datetime(2019, 3, 1) - backtesting_end = datetime(2019, 3, 31) - - @classmethod - def setup_class(cls): - # We load the SPY data directly and calculate the adjusted returns. - file_path = os.getcwd() + "/data/SPY.csv" - df = pd.read_csv(file_path) - df.rename(columns={"Date": "date"}, inplace=True) - df['date'] = pd.to_datetime(df['date']) - df.set_index('date', inplace=True) - df['expected_return'] = df['Adj Close'].pct_change() - cls.expected_df = df - - # @pytest.mark.skip() - @pytest.mark.skipif(not ALPACA_CONFIG['API_KEY'], reason="This test requires an alpaca API key") - @pytest.mark.skipif(ALPACA_CONFIG['API_KEY'] == '', reason="This test requires an alpaca API key") - def test_alpaca_data_source_daily_bars(self): - """ - Among other things, this tests that the alpaca data_source calculates SIMPLE returns for bars. - Since we don't get dividends with alpaca, we are not going to check if the returns are adjusted correctly. - """ - data_source = AlpacaData(ALPACA_CONFIG) - prices = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) - - assert isinstance(prices.df.index[0], pd.Timestamp) - # assert prices.df.index[0].tzinfo.zone == "America/New_York" # Note, this is different from all others - assert prices.df.index[0].tzinfo == pytz.timezone("UTC") - assert len(prices.df) == self.length - - assert isinstance(prices.df.index[0], pd.Timestamp) - - # assert that the last row has a return value - assert prices.df["return"].iloc[-1] is not None - - # check that there is no dividend column... This test will fail when dividends are added. We hope that's soon. - assert "dividend" not in prices.df.columns - - # @pytest.mark.skip() - def test_yahoo_data_source_daily_bars(self): - """ - This tests that the yahoo data_source calculates adjusted returns for bars and that they - are calculated correctly. - """ - start = self.backtesting_start + timedelta(days=25) - end = self.backtesting_end + timedelta(days=25) - data_source = YahooData(datetime_start=start, datetime_end=end) - prices = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) - - assert isinstance(prices.df.index[0], pd.Timestamp) - assert prices.df.index[0].tzinfo.zone == "America/New_York" - assert len(prices.df) == self.length - - # assert that the last row has a return value - assert prices.df["return"].iloc[-1] is not None - - # check that there is a dividend column. - assert "dividend" in prices.df.columns - - # assert that there was a dividend paid on 3/15 - assert prices.df["dividend"].loc["2019-03-15"] != 0.0 - - # make a new dataframe where the index is Date and the columns are the actual returns - actual_df = pd.DataFrame(columns=["actual_return"]) - for dt, row in prices.df.iterrows(): - actual_return = row["return"] - actual_df.loc[dt.date()] = { - "actual_return": actual_return, - } - - comparison_df = pd.concat( - [actual_df["actual_return"], - self.expected_df["expected_return"]], - axis=1).reindex(actual_df.index) - - comparison_df = comparison_df.dropna() - # print(f"\n{comparison_df}") - - # check that the returns are adjusted correctly - assert_series_equal( - comparison_df["actual_return"], - comparison_df["expected_return"], - check_names=False, - check_index=True, - atol=1e-4, - rtol=0 - ) - - # @pytest.mark.skip() - def test_pandas_data_source_daily_bars(self, pandas_data_fixture): - """ - This tests that the pandas data_source calculates adjusted returns for bars and that they - are calculated correctly. It assumes that it is provided split adjusted OHLCV and dividend data. - """ - start = self.backtesting_start + timedelta(days=25) - end = self.backtesting_end + timedelta(days=25) - data_source = PandasData( - datetime_start=start, - datetime_end=end, - pandas_data=pandas_data_fixture - ) - prices = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) - assert isinstance(prices.df.index[0], pd.Timestamp) - assert prices.df.index[0].tzinfo.zone == "America/New_York" - assert len(prices.df) == self.length - assert prices.df["return"].iloc[-1] is not None - - # check that there is a dividend column. - assert "dividend" in prices.df.columns - - # assert that there was a dividend paid on 3/15 - assert prices.df["dividend"].loc["2019-03-15"] != 0.0 - - # make a new dataframe where the index is Date and the columns are the actual returns - actual_df = pd.DataFrame(columns=["actual_return"]) - for dt, row in prices.df.iterrows(): - actual_return = row["return"] - actual_df.loc[dt.date()] = { - "actual_return": actual_return, - } - - comparison_df = pd.concat( - [actual_df["actual_return"], - self.expected_df["expected_return"]], - axis=1).reindex(actual_df.index) - - comparison_df = comparison_df.dropna() - # print(f"\n{comparison_df}") - - # check that the returns are adjusted correctly - assert_series_equal( - comparison_df["actual_return"], - comparison_df["expected_return"], - check_names=False, - check_index=True, - atol=1e-4, - rtol=0 - ) - - # @pytest.mark.skip() - @pytest.mark.skipif(POLYGON_API_KEY == '', reason="This test requires a Polygon.io API key") - def test_polygon_data_source_daily_bars(self): - """ - This tests that the po broker calculates SIMPLE returns for bars. Since we don't get dividends with - alpaca, we are not going to check if the returns are adjusted correctly. - """ - # get data from 3 months ago, so we can use the free Polygon.io data - start = datetime.now() - timedelta(days=90) - end = datetime.now() - timedelta(days=60) - tzinfo = pytz.timezone("America/New_York") - start = start.astimezone(tzinfo) - end = end.astimezone(tzinfo) - - data_source = PolygonDataBacktesting( - start, end, api_key=POLYGON_API_KEY - ) - prices = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) - - assert isinstance(prices.df.index[0], pd.Timestamp) - assert prices.df.index[0].tzinfo.zone == "America/New_York" - assert len(prices.df) == self.length - - # assert that the last row has a return value - assert prices.df["return"].iloc[-1] is not None - - assert isinstance(prices.df.index[0], pd.Timestamp) - - @pytest.mark.skipif(not TRADIER_CONFIG['ACCESS_TOKEN'], reason="No Tradier credentials provided.") - def test_tradier_data_source_generates_simple_returns(self): - """ - This tests that the po broker calculates SIMPLE returns for bars. Since we don't get dividends with - tradier, we are not going to check if the returns are adjusted correctly. - """ - data_source = TradierData( - account_number=TRADIER_CONFIG["ACCOUNT_NUMBER"], - access_token=TRADIER_CONFIG["ACCESS_TOKEN"], - paper=TRADIER_CONFIG["PAPER"], - ) - - prices = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) - - assert isinstance(prices.df.index[0], pd.Timestamp) - assert prices.df.index[0].tzinfo.zone == "America/New_York" - assert len(prices.df) == self.length - - # This shows a bug. The index a datetime.date but should be a timestamp - assert isinstance(prices.df.index[0], pd.Timestamp) - - # assert that the last row has a return value - assert prices.df["return"].iloc[-1] is not None diff --git a/tests/test_drift_rebalancer.py b/tests/test_drift_rebalancer.py index 46ef1677..f1483126 100644 --- a/tests/test_drift_rebalancer.py +++ b/tests/test_drift_rebalancer.py @@ -13,11 +13,11 @@ from lumibot.backtesting import BacktestingBroker, YahooDataBacktesting, PandasDataBacktesting from lumibot.strategies.strategy import Strategy from tests.fixtures import pandas_data_fixture -from lumibot.tools import print_full_pandas_dataframes, set_pandas_float_precision +from lumibot.tools import print_full_pandas_dataframes, set_pandas_float_display_precision from lumibot.entities import Order print_full_pandas_dataframes() -set_pandas_float_precision(precision=5) +set_pandas_float_display_precision(precision=5) class MockStrategyWithDriftCalculationLogic(Strategy): diff --git a/tests/test_get_historical_prices.py b/tests/test_get_historical_prices.py new file mode 100644 index 00000000..da43e666 --- /dev/null +++ b/tests/test_get_historical_prices.py @@ -0,0 +1,244 @@ +import os +from datetime import datetime, timedelta +import logging + +import pytest + +import pandas as pd +import pytz +from pandas.testing import assert_series_equal + +from lumibot.backtesting import PolygonDataBacktesting, YahooDataBacktesting +from lumibot.data_sources import AlpacaData, TradierData, PandasData +from tests.fixtures import pandas_data_fixture +from lumibot.tools import print_full_pandas_dataframes, set_pandas_float_display_precision +from lumibot.entities import Asset, Bars +from lumibot.tools import get_trading_days + +# Global parameters +# API Key for testing Polygon.io +from lumibot.credentials import POLYGON_API_KEY +from lumibot.credentials import TRADIER_CONFIG, ALPACA_CONFIG + + +logger = logging.getLogger(__name__) +print_full_pandas_dataframes() +set_pandas_float_display_precision() + + +def check_bars( + *, + bars: Bars, + length: int = 30, + check_timezone: bool = True, +): + """ + This tests: + - the right number of bars are retrieved + - the index is a timestamp + - optionally checks the timezone of the index (alpaca is incorrect) + - the bars contain returns + """ + assert len(bars.df) == length + assert isinstance(bars.df.index[-1], pd.Timestamp) + + if check_timezone: + assert bars.df.index[-1].tzinfo.zone == "America/New_York" + + assert bars.df["return"].iloc[-1] is not None + + +class TestDatasourceBacktestingGetHistoricalPricesDailyData: + """These tests check the daily Bars returned from get_historical_prices for backtesting data sources.""" + + length = 30 + ticker = "SPY" + asset = Asset("SPY") + timestep = "day" + + @classmethod + def setup_class(cls): + pass + + # noinspection PyMethodMayBeStatic + def check_date_of_last_bar_is_date_of_last_trading_date_before_backtest_start( + self, bars: Bars, + backtesting_start: datetime + ): + # The current behavior of the backtesting data sources is to return the data for the + # last trading day before now. In this case, "now" is the backtesting_start date. + # So based on the backtesting_start date, the last bar should be the bar from the previous trading day. + previous_trading_day_date = get_trading_days( + market="NYSE", + start_date=backtesting_start - timedelta(days=5), + end_date=backtesting_start - timedelta(days=1) + ).index[-1].date() + assert bars.df.index[-1].date() == previous_trading_day_date + + # noinspection PyMethodMayBeStatic + def check_dividends_and_adjusted_returns(self, bars): + assert "dividend" in bars.df.columns + assert bars.df["dividend"].iloc[-1] is not None + + # assert that there was a dividend paid on 3/15 + assert bars.df["dividend"].loc["2019-03-15"] != 0.0 + + # make a new dataframe where the index is Date and the columns are the actual returns + actual_df = pd.DataFrame(columns=["actual_return"]) + for dt, row in bars.df.iterrows(): + actual_return = row["return"] + actual_df.loc[dt.date()] = { + "actual_return": actual_return, + } + + # We load the SPY data directly and calculate the adjusted returns. + file_path = os.getcwd() + "/data/SPY.csv" + expected_df = pd.read_csv(file_path) + expected_df.rename(columns={"Date": "date"}, inplace=True) + expected_df['date'] = pd.to_datetime(expected_df['date']) + expected_df.set_index('date', inplace=True) + expected_df['expected_return'] = expected_df['Adj Close'].pct_change() + + comparison_df = pd.concat( + [actual_df["actual_return"], + expected_df["expected_return"]], + axis=1).reindex(actual_df.index) + + comparison_df = comparison_df.dropna() + # print(f"\n{comparison_df}") + + # check that the returns are adjusted correctly + assert_series_equal( + comparison_df["actual_return"], + comparison_df["expected_return"], + check_names=False, + check_index=True, + atol=1e-4, + rtol=0 + ) + + def test_pandas_backtesting_data_source_get_historical_prices_daily_bars(self, pandas_data_fixture): + """ + This tests that the pandas data_source calculates adjusted returns for bars and that they + are calculated correctly. It assumes that it is provided split adjusted OHLCV and dividend data. + """ + backtesting_start = datetime(2019, 3, 26) + backtesting_end = datetime(2019, 4, 25) + data_source = PandasData( + datetime_start=backtesting_start, + datetime_end=backtesting_end, + pandas_data=pandas_data_fixture + ) + bars = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) + check_bars(bars=bars, length=self.length) + self.check_date_of_last_bar_is_date_of_last_trading_date_before_backtest_start(bars, backtesting_start=backtesting_start) + self.check_dividends_and_adjusted_returns(bars) + + # @pytest.mark.skip() + @pytest.mark.skipif(POLYGON_API_KEY == '', reason="This test requires a Polygon.io API key") + def test_polygon_backtesting_data_source_get_historical_prices_daily_bars(self): + backtesting_end = datetime.now() - timedelta(days=1) + backtesting_start = backtesting_end - timedelta(days=self.length * 2 + 5) + data_source = PolygonDataBacktesting( + backtesting_start, backtesting_end, api_key=POLYGON_API_KEY + ) + bars = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) + check_bars(bars=bars, length=self.length) + self.check_date_of_last_bar_is_date_of_last_trading_date_before_backtest_start(bars, backtesting_start=backtesting_start) + + def test_yahoo_backtesting_data_source_get_historical_prices_daily_bars(self, pandas_data_fixture): + """ + This tests that the yahoo data_source calculates adjusted returns for bars and that they + are calculated correctly. It assumes that it is provided split adjusted OHLCV and dividend data. + """ + backtesting_start = datetime(2019, 3, 25) + backtesting_end = datetime(2019, 4, 25) + data_source = YahooDataBacktesting( + datetime_start=backtesting_start, + datetime_end=backtesting_end, + pandas_data=pandas_data_fixture + ) + bars = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) + check_bars(bars=bars, length=self.length) + self.check_dividends_and_adjusted_returns(bars) + self.check_date_of_last_bar_is_date_of_last_trading_date_before_backtest_start(bars, backtesting_start=backtesting_start) + + +# @pytest.mark.skip() +class TestDatasourceGetHistoricalPricesDailyData: + """These tests check the daily Bars returned from get_historical_prices for live data sources.""" + + length = 30 + ticker = "SPY" + asset = Asset("SPY") + timestep = "day" + now = datetime.now().astimezone(pytz.timezone("America/New_York")) + today = now.date() + trading_days = get_trading_days(market="NYSE", start_date=datetime.now() - timedelta(days=7)) + + @classmethod + def setup_class(cls): + pass + + def check_date_of_last_bar_is_correct_for_live_data_sources(self, bars): + """ + Weird test: the results depend on the date and time the test is run. + If you ask for one bar before the market is closed, you should get the bar from the last trading day. + If you ask for one bar while the market is open, you should get an incomplete bar for the current day. + If you ask for one bar after the market is closed, you should get a complete bar from the current trading day. + """ + + if self.today in self.trading_days.index.date: + market_open = self.trading_days.loc[str(self.today), 'market_open'] + + if self.now < market_open: + # if now is before market open, the bar should from previous trading day + assert bars.df.index[-1].date() == self.trading_days.index[-2].date() + else: + # if now is after market open, the bar should be from today + assert bars.df.index[-1].date() == self.trading_days.index[-1].date() + + else: + # if it's not a trading day, the last bar the bar should from the last trading day + assert bars.df.index[-1].date() == self.trading_days.index[-1].date() + + # @pytest.mark.skip() + @pytest.mark.skipif(not ALPACA_CONFIG['API_KEY'], reason="This test requires an alpaca API key") + @pytest.mark.skipif( + ALPACA_CONFIG['API_KEY'] == '', + reason="This test requires an alpaca API key" + ) + def test_alpaca_data_source_get_historical_prices_daily_bars(self): + data_source = AlpacaData(ALPACA_CONFIG) + bars = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) + + # Alpaca's time zone is UTC. We should probably convert it to America/New_York + # Alpaca data source does not provide dividends + check_bars(bars=bars, length=self.length, check_timezone=False) + self.check_date_of_last_bar_is_correct_for_live_data_sources(bars) + + # TODO: convert the timezones returned by alpaca to America/New_York + assert bars.df.index[0].tzinfo == pytz.timezone("UTC") + + # This simulates what the call to get_yesterday_dividends does (lookback of 1) + bars = data_source.get_historical_prices(asset=self.asset, length=1, timestep=self.timestep) + check_bars(bars=bars, length=1, check_timezone=False) + self.check_date_of_last_bar_is_correct_for_live_data_sources(bars) + + # @pytest.mark.skip() + @pytest.mark.skipif(not TRADIER_CONFIG['ACCESS_TOKEN'], reason="No Tradier credentials provided.") + def test_tradier_data_source_get_historical_prices_daily_bars(self): + data_source = TradierData( + account_number=TRADIER_CONFIG["ACCOUNT_NUMBER"], + access_token=TRADIER_CONFIG["ACCESS_TOKEN"], + paper=TRADIER_CONFIG["PAPER"], + ) + + bars = data_source.get_historical_prices(asset=self.asset, length=self.length, timestep=self.timestep) + check_bars(bars=bars, length=self.length) + self.check_date_of_last_bar_is_correct_for_live_data_sources(bars) + + # This simulates what the call to get_yesterday_dividends does (lookback of 1) + bars = data_source.get_historical_prices(asset=self.asset, length=1, timestep=self.timestep) + check_bars(bars=bars, length=1) + self.check_date_of_last_bar_is_correct_for_live_data_sources(bars) diff --git a/tests/test_momentum.py b/tests/test_momentum.py index 713e8a40..1f73b6bd 100644 --- a/tests/test_momentum.py +++ b/tests/test_momentum.py @@ -11,12 +11,12 @@ from lumibot.strategies import Strategy from lumibot.backtesting import PandasDataBacktesting, YahooDataBacktesting, PolygonDataBacktesting from tests.fixtures import pandas_data_fixture -from lumibot.tools import print_full_pandas_dataframes, set_pandas_float_precision +from lumibot.tools import print_full_pandas_dataframes, set_pandas_float_display_precision logger = logging.getLogger(__name__) # print_full_pandas_dataframes() -# set_pandas_float_precision(precision=15) +# set_pandas_float_display_precision(precision=15) class MomoTester(Strategy):