Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[14.0][IMP] edi_oca: Do not retry exchange_send indefinitely #980

Open
wants to merge 2 commits into
base: 14.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 19 additions & 4 deletions edi_oca/models/edi_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import base64
import logging
import traceback
from datetime import timedelta
from io import StringIO

from odoo import _, exceptions, fields, models, tools
Expand Down Expand Up @@ -301,10 +302,20 @@ def exchange_send(self, exchange_record):
_logger.debug("%s sent", exchange_record.identifier)
except self._send_retryable_exceptions() as err:
error = _get_exception_msg()
_logger.debug("%s send failed. To be retried.", exchange_record.identifier)
raise RetryableJobError(
error, **exchange_record._job_retry_params()
) from err
if self._send_should_retry(exchange_record):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thinking out loud...
In theory a case where we spawn another job for the same record should not exist thanks to the identity_key.
Hence, I assume that jobs won't be retried infinitely if we reach the max retry job count.
If this does not happen it means the identity_key is not working and we end up w/ more than one job to send the same record.
Am I wrong?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@SilvioC2C ping

_logger.debug(
"%s send failed. To be retried.", exchange_record.identifier
)
raise RetryableJobError(
error, **exchange_record._job_retry_params()
) from err
else:
state = "output_error_on_send"
message = exchange_record._exchange_status_message("send_ko")
res = f"Error: {error}"
_logger.debug(
"%s send failed. Marked as errored.", exchange_record.identifier
)
except self._swallable_exceptions():
if self.env.context.get("_edi_send_break_on_error"):
raise
Expand Down Expand Up @@ -353,6 +364,10 @@ def _send_retryable_exceptions(self):
# when dealing w/ internal or external systems or filesystems
return (IOError, OSError)

def _send_should_retry(self, exc_record):
# Safety check not to retry indefinitely
return exc_record.create_date >= (fields.Datetime.now() - timedelta(days=1))

def _output_check_send(self, exchange_record):
if exchange_record.direction != "output":
raise exceptions.UserError(
Expand Down
33 changes: 33 additions & 0 deletions edi_oca/tests/test_backend_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).

import mock
from freezegun import freeze_time
from requests.exceptions import ConnectionError as ReqConnectionError

from odoo.addons.queue_job.exception import RetryableJobError
Expand Down Expand Up @@ -64,6 +65,38 @@ def test_output_fail_retry(self):
mocked.side_effect = ReqConnectionError("Connection broken")
with self.assertRaises(RetryableJobError):
job.perform()
self.assertEqual(record.edi_exchange_state, "output_pending")

def test_output_fail_too_many_retries(self):
job_counter = self.job_counter()
vals = {
"model": self.partner._name,
"res_id": self.partner.id,
"edi_exchange_state": "output_pending",
}
record = self.backend.create_record("test_csv_output", vals)
record._write({"create_date": "2024-01-10 09:00:00"})
record._set_file_content("ABC")
with mock.patch.object(type(self.backend), "_exchange_send") as mocked:
mocked.side_effect = ReqConnectionError("Connection broken")
with freeze_time("2024-01-10 11:00:00"):
# + 2 hours
job = self.backend.with_delay().exchange_send(record)
with self.assertRaises(RetryableJobError):
job.perform()
with freeze_time("2024-01-11 08:50:00"):
# + 23 hours and 50 minutes
job = self.backend.with_delay().exchange_send(record)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not using the same job you've got before?
However, if the identity_key works well, here you should get still the same job but I think is useless to call send again.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@SilvioC2C ping

with self.assertRaises(RetryableJobError):
job.perform()
self.assertEqual(record.edi_exchange_state, "output_pending")
with freeze_time("2024-01-11 09:20:00"):
# + 24 hours and 20 minutes
job = self.backend.with_delay().exchange_send(record)
res = job.perform()
self.assertIn("Error", res)
job_counter.search_created()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this search seems useless as you don't use the result

self.assertEqual(record.edi_exchange_state, "output_error_on_send")

def test_input(self):
job_counter = self.job_counter()
Expand Down
Loading