-
-
Notifications
You must be signed in to change notification settings - Fork 314
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[14.0][IMP] edi_oca: Do not retry exchange_send indefinitely #980
base: 14.0
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,6 +3,7 @@ | |
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). | ||
|
||
import mock | ||
from freezegun import freeze_time | ||
from requests.exceptions import ConnectionError as ReqConnectionError | ||
|
||
from odoo.addons.queue_job.exception import RetryableJobError | ||
|
@@ -64,6 +65,38 @@ def test_output_fail_retry(self): | |
mocked.side_effect = ReqConnectionError("Connection broken") | ||
with self.assertRaises(RetryableJobError): | ||
job.perform() | ||
self.assertEqual(record.edi_exchange_state, "output_pending") | ||
|
||
def test_output_fail_too_many_retries(self): | ||
job_counter = self.job_counter() | ||
vals = { | ||
"model": self.partner._name, | ||
"res_id": self.partner.id, | ||
"edi_exchange_state": "output_pending", | ||
} | ||
record = self.backend.create_record("test_csv_output", vals) | ||
record._write({"create_date": "2024-01-10 09:00:00"}) | ||
record._set_file_content("ABC") | ||
with mock.patch.object(type(self.backend), "_exchange_send") as mocked: | ||
mocked.side_effect = ReqConnectionError("Connection broken") | ||
with freeze_time("2024-01-10 11:00:00"): | ||
# + 2 hours | ||
job = self.backend.with_delay().exchange_send(record) | ||
with self.assertRaises(RetryableJobError): | ||
job.perform() | ||
with freeze_time("2024-01-11 08:50:00"): | ||
# + 23 hours and 50 minutes | ||
job = self.backend.with_delay().exchange_send(record) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why not using the same job you've got before? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @SilvioC2C ping |
||
with self.assertRaises(RetryableJobError): | ||
job.perform() | ||
self.assertEqual(record.edi_exchange_state, "output_pending") | ||
with freeze_time("2024-01-11 09:20:00"): | ||
# + 24 hours and 20 minutes | ||
job = self.backend.with_delay().exchange_send(record) | ||
res = job.perform() | ||
self.assertIn("Error", res) | ||
job_counter.search_created() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this search seems useless as you don't use the result |
||
self.assertEqual(record.edi_exchange_state, "output_error_on_send") | ||
|
||
def test_input(self): | ||
job_counter = self.job_counter() | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thinking out loud...
In theory a case where we spawn another job for the same record should not exist thanks to the
identity_key
.Hence, I assume that jobs won't be retried infinitely if we reach the max retry job count.
If this does not happen it means the identity_key is not working and we end up w/ more than one job to send the same record.
Am I wrong?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@SilvioC2C ping