diff --git a/README.md b/README.md
index 2a252b4eaed8..6ca7185523d5 100644
--- a/README.md
+++ b/README.md
@@ -175,11 +175,11 @@ For more information about the supported formats, see:
| [Kitti Raw Format](https://www.cvlibs.net/datasets/kitti/raw_data.php) | ✔️ | ✔️ |
| [LFW](http://vis-www.cs.umass.edu/lfw/) | ✔️ | ✔️ |
| [Supervisely Point Cloud Format](https://docs.supervise.ly/data-organization/00_ann_format_navi) | ✔️ | ✔️ |
-| [YOLOv8 Detection](https://docs.ultralytics.com/datasets/detect/) | ✔️ | ✔️ |
-| [YOLOv8 Oriented Bounding Boxes](https://docs.ultralytics.com/datasets/obb/) | ✔️ | ✔️ |
-| [YOLOv8 Segmentation](https://docs.ultralytics.com/datasets/segment/) | ✔️ | ✔️ |
-| [YOLOv8 Pose](https://docs.ultralytics.com/datasets/pose/) | ✔️ | ✔️ |
-| [YOLOv8 Classification](https://docs.ultralytics.com/datasets/classify/) | ✔️ | ✔️ |
+| [Ultralytics YOLO Detection](https://docs.ultralytics.com/datasets/detect/) | ✔️ | ✔️ |
+| [Ultralytics YOLO Oriented Bounding Boxes](https://docs.ultralytics.com/datasets/obb/) | ✔️ | ✔️ |
+| [Ultralytics YOLO Segmentation](https://docs.ultralytics.com/datasets/segment/) | ✔️ | ✔️ |
+| [Ultralytics YOLO Pose](https://docs.ultralytics.com/datasets/pose/) | ✔️ | ✔️ |
+| [Ultralytics YOLO Classification](https://docs.ultralytics.com/datasets/classify/) | ✔️ | ✔️ |
diff --git a/cvat/apps/dataset_manager/formats/yolo.py b/cvat/apps/dataset_manager/formats/yolo.py
index 887232b8e666..1a138557c862 100644
--- a/cvat/apps/dataset_manager/formats/yolo.py
+++ b/cvat/apps/dataset_manager/formats/yolo.py
@@ -77,53 +77,53 @@ def _import_yolo(*args, **kwargs):
_import_common(*args, format_name="yolo", **kwargs)
-@exporter(name='YOLOv8 Detection', ext='ZIP', version='1.0')
-def _export_yolov8_detection(*args, **kwargs):
- _export_common(*args, format_name='yolov8_detection', **kwargs)
+@exporter(name='Ultralytics YOLO Detection', ext='ZIP', version='1.0')
+def _export_yolo_ultralytics_detection(*args, **kwargs):
+ _export_common(*args, format_name='yolo_ultralytics_detection', **kwargs)
-@exporter(name='YOLOv8 Oriented Bounding Boxes', ext='ZIP', version='1.0')
-def _export_yolov8_oriented_boxes(*args, **kwargs):
- _export_common(*args, format_name='yolov8_oriented_boxes', **kwargs)
+@exporter(name='Ultralytics YOLO Oriented Bounding Boxes', ext='ZIP', version='1.0')
+def _export_yolo_ultralytics_oriented_boxes(*args, **kwargs):
+ _export_common(*args, format_name='yolo_ultralytics_oriented_boxes', **kwargs)
-@exporter(name='YOLOv8 Segmentation', ext='ZIP', version='1.0')
-def _export_yolov8_segmentation(dst_file, temp_dir, instance_data, *, save_images=False):
+@exporter(name='Ultralytics YOLO Segmentation', ext='ZIP', version='1.0')
+def _export_yolo_ultralytics_segmentation(dst_file, temp_dir, instance_data, *, save_images=False):
with GetCVATDataExtractor(instance_data, include_images=save_images) as extractor:
dataset = Dataset.from_extractors(extractor, env=dm_env)
dataset = dataset.transform('masks_to_polygons')
- dataset.export(temp_dir, 'yolov8_segmentation', save_images=save_images)
+ dataset.export(temp_dir, 'yolo_ultralytics_segmentation', save_images=save_images)
make_zip_archive(temp_dir, dst_file)
-@exporter(name='YOLOv8 Pose', ext='ZIP', version='1.0')
-def _export_yolov8_pose(*args, **kwargs):
- _export_common(*args, format_name='yolov8_pose', **kwargs)
+@exporter(name='Ultralytics YOLO Pose', ext='ZIP', version='1.0')
+def _export_yolo_ultralytics_pose(*args, **kwargs):
+ _export_common(*args, format_name='yolo_ultralytics_pose', **kwargs)
-@exporter(name='YOLOv8 Classification', ext='ZIP', version='1.0')
-def _export_yolov8_classification(*args, **kwargs):
- _export_common(*args, format_name='yolov8_classification', **kwargs)
+@exporter(name='Ultralytics YOLO Classification', ext='ZIP', version='1.0')
+def _export_yolo_ultralytics_classification(*args, **kwargs):
+ _export_common(*args, format_name='yolo_ultralytics_classification', **kwargs)
-@importer(name='YOLOv8 Detection', ext="ZIP", version="1.0")
-def _import_yolov8_detection(*args, **kwargs):
- _import_common(*args, format_name="yolov8_detection", **kwargs)
+@importer(name='Ultralytics YOLO Detection', ext="ZIP", version="1.0")
+def _import_yolo_ultralytics_detection(*args, **kwargs):
+ _import_common(*args, format_name="yolo_ultralytics_detection", **kwargs)
-@importer(name='YOLOv8 Segmentation', ext="ZIP", version="1.0")
-def _import_yolov8_segmentation(*args, **kwargs):
- _import_common(*args, format_name="yolov8_segmentation", **kwargs)
+@importer(name='Ultralytics YOLO Segmentation', ext="ZIP", version="1.0")
+def _import_yolo_ultralytics_segmentation(*args, **kwargs):
+ _import_common(*args, format_name="yolo_ultralytics_segmentation", **kwargs)
-@importer(name='YOLOv8 Oriented Bounding Boxes', ext="ZIP", version="1.0")
-def _import_yolov8_oriented_boxes(*args, **kwargs):
- _import_common(*args, format_name="yolov8_oriented_boxes", **kwargs)
+@importer(name='Ultralytics YOLO Oriented Bounding Boxes', ext="ZIP", version="1.0")
+def _import_yolo_ultralytics_oriented_boxes(*args, **kwargs):
+ _import_common(*args, format_name="yolo_ultralytics_oriented_boxes", **kwargs)
-@importer(name='YOLOv8 Pose', ext="ZIP", version="1.0")
-def _import_yolov8_pose(src_file, temp_dir, instance_data, **kwargs):
+@importer(name='Ultralytics YOLO Pose', ext="ZIP", version="1.0")
+def _import_yolo_ultralytics_pose(src_file, temp_dir, instance_data, **kwargs):
with GetCVATDataExtractor(instance_data) as extractor:
point_categories = extractor.categories().get(AnnotationType.points)
label_categories = extractor.categories().get(AnnotationType.label)
@@ -135,12 +135,12 @@ def _import_yolov8_pose(src_file, temp_dir, instance_data, **kwargs):
src_file,
temp_dir,
instance_data,
- format_name="yolov8_pose",
+ format_name="yolo_ultralytics_pose",
import_kwargs=dict(skeleton_sub_labels=true_skeleton_point_labels),
**kwargs
)
-@importer(name='YOLOv8 Classification', ext="ZIP", version="1.0")
-def _import_yolov8_classification(*args, **kwargs):
- _import_common(*args, format_name="yolov8_classification", **kwargs)
+@importer(name='Ultralytics YOLO Classification', ext="ZIP", version="1.0")
+def _import_yolo_ultralytics_classification(*args, **kwargs):
+ _import_common(*args, format_name="yolo_ultralytics_classification", **kwargs)
diff --git a/cvat/apps/dataset_manager/tests/assets/annotations.json b/cvat/apps/dataset_manager/tests/assets/annotations.json
index 9f7c27b94bcb..a0c9e8ff96d5 100644
--- a/cvat/apps/dataset_manager/tests/assets/annotations.json
+++ b/cvat/apps/dataset_manager/tests/assets/annotations.json
@@ -976,7 +976,7 @@
],
"tracks": []
},
- "YOLOv8 Classification 1.0": {
+ "Ultralytics YOLO Classification 1.0": {
"version": 0,
"tags": [
{
@@ -990,7 +990,7 @@
"shapes": [],
"tracks": []
},
- "YOLOv8 Detection 1.0": {
+ "Ultralytics YOLO Detection 1.0": {
"version": 0,
"tags": [],
"shapes": [
@@ -1008,7 +1008,7 @@
],
"tracks": []
},
- "YOLOv8 Oriented Bounding Boxes 1.0": {
+ "Ultralytics YOLO Oriented Bounding Boxes 1.0": {
"version": 0,
"tags": [],
"shapes": [
@@ -1027,7 +1027,7 @@
],
"tracks": []
},
- "YOLOv8 Segmentation 1.0": {
+ "Ultralytics YOLO Segmentation 1.0": {
"version": 0,
"tags": [],
"shapes": [
@@ -1045,7 +1045,7 @@
],
"tracks": []
},
- "YOLOv8 Pose 1.0": {
+ "Ultralytics YOLO Pose 1.0": {
"version": 0,
"tags": [],
"shapes": [
diff --git a/cvat/apps/dataset_manager/tests/assets/tasks.json b/cvat/apps/dataset_manager/tests/assets/tasks.json
index 2c29ce712929..ad68c6f5aa5f 100644
--- a/cvat/apps/dataset_manager/tests/assets/tasks.json
+++ b/cvat/apps/dataset_manager/tests/assets/tasks.json
@@ -634,8 +634,8 @@
}
]
},
- "YOLOv8 Pose 1.0": {
- "name": "YOLOv8 pose task",
+ "Ultralytics YOLO Pose 1.0": {
+ "name": "Ultralytics YOLO pose task",
"overlap": 0,
"segment_size": 100,
"labels": [
diff --git a/cvat/apps/dataset_manager/tests/test_formats.py b/cvat/apps/dataset_manager/tests/test_formats.py
index 91a3081ca089..dd88cde416cc 100644
--- a/cvat/apps/dataset_manager/tests/test_formats.py
+++ b/cvat/apps/dataset_manager/tests/test_formats.py
@@ -292,11 +292,11 @@ def test_export_formats_query(self):
'LFW 1.0',
'Cityscapes 1.0',
'Open Images V6 1.0',
- 'YOLOv8 Classification 1.0',
- 'YOLOv8 Oriented Bounding Boxes 1.0',
- 'YOLOv8 Detection 1.0',
- 'YOLOv8 Pose 1.0',
- 'YOLOv8 Segmentation 1.0',
+ 'Ultralytics YOLO Classification 1.0',
+ 'Ultralytics YOLO Oriented Bounding Boxes 1.0',
+ 'Ultralytics YOLO Detection 1.0',
+ 'Ultralytics YOLO Pose 1.0',
+ 'Ultralytics YOLO Segmentation 1.0',
})
def test_import_formats_query(self):
@@ -329,11 +329,11 @@ def test_import_formats_query(self):
'Open Images V6 1.0',
'Datumaro 1.0',
'Datumaro 3D 1.0',
- 'YOLOv8 Classification 1.0',
- 'YOLOv8 Oriented Bounding Boxes 1.0',
- 'YOLOv8 Detection 1.0',
- 'YOLOv8 Pose 1.0',
- 'YOLOv8 Segmentation 1.0',
+ 'Ultralytics YOLO Classification 1.0',
+ 'Ultralytics YOLO Oriented Bounding Boxes 1.0',
+ 'Ultralytics YOLO Detection 1.0',
+ 'Ultralytics YOLO Pose 1.0',
+ 'Ultralytics YOLO Segmentation 1.0',
})
def test_exports(self):
@@ -383,11 +383,11 @@ def test_empty_images_are_exported(self):
# ('KITTI 1.0', 'kitti') format does not support empty annotations
('LFW 1.0', 'lfw'),
# ('Cityscapes 1.0', 'cityscapes'), does not support, empty annotations
- ('YOLOv8 Classification 1.0', 'yolov8_classification'),
- ('YOLOv8 Oriented Bounding Boxes 1.0', 'yolov8_oriented_boxes'),
- ('YOLOv8 Detection 1.0', 'yolov8_detection'),
- ('YOLOv8 Pose 1.0', 'yolov8_pose'),
- ('YOLOv8 Segmentation 1.0', 'yolov8_segmentation'),
+ ('Ultralytics YOLO Classification 1.0', 'yolo_ultralytics_classification'),
+ ('Ultralytics YOLO Oriented Bounding Boxes 1.0', 'yyolo_ultralytics_oriented_boxes'),
+ ('Ultralytics YOLO Detection 1.0', 'yolo_ultralytics_detection'),
+ ('Ultralytics YOLO Pose 1.0', 'yolo_ultralytics_pose'),
+ ('Ultralytics YOLO Segmentation 1.0', 'yolo_ultralytics_segmentation'),
]:
with self.subTest(format=format_name):
if not dm.formats.registry.EXPORT_FORMATS[format_name].ENABLED:
diff --git a/cvat/apps/dataset_manager/tests/test_rest_api_formats.py b/cvat/apps/dataset_manager/tests/test_rest_api_formats.py
index 50883826b5a5..f3640b835bcb 100644
--- a/cvat/apps/dataset_manager/tests/test_rest_api_formats.py
+++ b/cvat/apps/dataset_manager/tests/test_rest_api_formats.py
@@ -55,12 +55,12 @@
DEFAULT_ATTRIBUTES_FORMATS = [
"VGGFace2 1.0",
"WiderFace 1.0",
- "YOLOv8 Classification 1.0",
+ "Ultralytics YOLO Classification 1.0",
"YOLO 1.1",
- "YOLOv8 Detection 1.0",
- "YOLOv8 Segmentation 1.0",
- "YOLOv8 Oriented Bounding Boxes 1.0",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Detection 1.0",
+ "Ultralytics YOLO Segmentation 1.0",
+ "Ultralytics YOLO Oriented Bounding Boxes 1.0",
+ "Ultralytics YOLO Pose 1.0",
"PASCAL VOC 1.1",
"Segmentation mask 1.1",
"ImageNet 1.0",
@@ -411,7 +411,7 @@ def test_api_v2_dump_and_upload_annotations_with_objects_type_is_shape(self):
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[dump_format_name], images)
else:
@@ -469,7 +469,7 @@ def test_api_v2_dump_and_upload_annotations_with_objects_type_is_shape(self):
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[upload_format_name], images)
else:
@@ -513,7 +513,7 @@ def test_api_v2_dump_annotations_with_objects_type_is_track(self):
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[dump_format_name], video)
else:
@@ -569,7 +569,7 @@ def test_api_v2_dump_annotations_with_objects_type_is_track(self):
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[upload_format_name], video)
else:
@@ -846,7 +846,7 @@ def test_api_v2_export_dataset(self):
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[dump_format_name], images)
else:
@@ -947,7 +947,7 @@ def test_api_v2_rewriting_annotations(self):
if dump_format_name in [
"Market-1501 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
- "ICDAR Segmentation 1.0", "COCO Keypoints 1.0", "YOLOv8 Pose 1.0",
+ "ICDAR Segmentation 1.0", "COCO Keypoints 1.0", "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[dump_format_name], images)
else:
@@ -1058,7 +1058,7 @@ def test_api_v2_tasks_annotations_dump_and_upload_with_datumaro(self):
"Market-1501 1.0", "Cityscapes 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "COCO Keypoints 1.0",
- "YOLOv8 Pose 1.0",
+ "Ultralytics YOLO Pose 1.0",
]:
task = self._create_task(tasks[dump_format_name], images)
else:
diff --git a/cvat/apps/engine/tests/test_rest_api.py b/cvat/apps/engine/tests/test_rest_api.py
index e6ed6b6c0303..b0c5500eda4c 100644
--- a/cvat/apps/engine/tests/test_rest_api.py
+++ b/cvat/apps/engine/tests/test_rest_api.py
@@ -6127,13 +6127,13 @@ def _get_initial_annotation(annotation_format):
elif annotation_format == "YOLO 1.1":
annotations["shapes"] = rectangle_shapes_wo_attrs
- elif annotation_format == "YOLOv8 Detection 1.0":
+ elif annotation_format == "Ultralytics YOLO Detection 1.0":
annotations["shapes"] = rectangle_shapes_wo_attrs
- elif annotation_format == "YOLOv8 Oriented Bounding Boxes 1.0":
+ elif annotation_format == "Ultralytics YOLO Oriented Bounding Boxes 1.0":
annotations["shapes"] = rectangle_shapes_wo_attrs
- elif annotation_format == "YOLOv8 Segmentation 1.0":
+ elif annotation_format == "Ultralytics YOLO Segmentation 1.0":
annotations["shapes"] = polygon_shapes_wo_attrs
elif annotation_format == "COCO 1.0":
@@ -6493,7 +6493,10 @@ def etree_to_dict(t):
self.assertEqual(meta["task"]["name"], task["name"])
elif format_name == "PASCAL VOC 1.1":
self.assertTrue(zipfile.is_zipfile(content))
- elif format_name in ["YOLO 1.1", "YOLOv8 Detection 1.0", "YOLOv8 Segmentation 1.0", "YOLOv8 Oriented Bounding Boxes 1.0", "YOLOv8 Pose 1.0"]:
+ elif format_name in [
+ "YOLO 1.1", "Ultralytics YOLO Detection 1.0", "Ultralytics YOLO Segmentation 1.0",
+ "Ultralytics YOLO Oriented Bounding Boxes 1.0", "Ultralytics YOLO Pose 1.0",
+ ]:
self.assertTrue(zipfile.is_zipfile(content))
elif format_name in ['Kitti Raw Format 1.0','Sly Point Cloud Format 1.0']:
self.assertTrue(zipfile.is_zipfile(content))
diff --git a/cvat/requirements/base.in b/cvat/requirements/base.in
index fd86b51f99dc..043b7313ca7f 100644
--- a/cvat/requirements/base.in
+++ b/cvat/requirements/base.in
@@ -12,7 +12,7 @@ azure-storage-blob==12.13.0
boto3==1.17.61
clickhouse-connect==0.6.8
coreapi==2.3.3
-datumaro @ git+https://github.com/cvat-ai/datumaro.git@bf0374689df50599a34a4f220b9e5329aca695ce
+datumaro @ git+https://github.com/cvat-ai/datumaro.git@f55ccf92f7d61d7dad276dd0007fc4aeed72e0bb
dj-pagination==2.5.0
# Despite direct indication allauth in requirements we should keep 'with_social' for dj-rest-auth
# to avoid possible further versions conflicts (we use registration functionality)
diff --git a/cvat/requirements/base.txt b/cvat/requirements/base.txt
index fe4518b64e44..d08715602625 100644
--- a/cvat/requirements/base.txt
+++ b/cvat/requirements/base.txt
@@ -56,7 +56,7 @@ cryptography==44.0.0
# pyjwt
cycler==0.12.1
# via matplotlib
-datumaro @ git+https://github.com/cvat-ai/datumaro.git@bf0374689df50599a34a4f220b9e5329aca695ce
+datumaro @ git+https://github.com/cvat-ai/datumaro.git@f55ccf92f7d61d7dad276dd0007fc4aeed72e0bb
# via -r cvat/requirements/base.in
defusedxml==0.7.1
# via
diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml
index db9a8bbac01d..bed245a0b39a 100644
--- a/docker-compose.dev.yml
+++ b/docker-compose.dev.yml
@@ -29,6 +29,8 @@ services:
COVERAGE_PROCESS_START:
ports:
- '9090:9090'
+ volumes:
+ - ../datumaro/datumaro:/opt/venv/lib/python3.10/site-packages/datumaro
cvat_worker_export:
environment:
@@ -41,6 +43,8 @@ services:
COVERAGE_PROCESS_START:
ports:
- '9092:9092'
+ volumes:
+ - ../datumaro/datumaro:/opt/venv/lib/python3.10/site-packages/datumaro
cvat_worker_import:
environment:
@@ -53,6 +57,8 @@ services:
COVERAGE_PROCESS_START:
ports:
- '9093:9093'
+ volumes:
+ - ../datumaro/datumaro:/opt/venv/lib/python3.10/site-packages/datumaro
cvat_worker_quality_reports:
environment:
diff --git a/site/content/en/docs/manual/advanced/formats/_index.md b/site/content/en/docs/manual/advanced/formats/_index.md
index f4d30a45baa1..e8818e3742f6 100644
--- a/site/content/en/docs/manual/advanced/formats/_index.md
+++ b/site/content/en/docs/manual/advanced/formats/_index.md
@@ -23,34 +23,34 @@ The table below outlines the available formats for data export in CVAT.
-| Format | Type | Computer Vision Task | Models | Shapes | Attributes | Video Tracks |
-|------------------------------------------------------------------------------------------------------------------------------------|---------------|-------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------| -------------------- | ------------- |
-| [CamVid 1.0](format-camvid) | .txt
.png | Semantic
Segmentation | U-Net, SegNet, DeepLab,
PSPNet, FCN, Mask R-CNN,
ICNet, ERFNet, HRNet,
V-Net, and others. | Polygons | Not supported | Not supported |
-| [Cityscapes 1.0](format-cityscapes) | .txt
.png | Semantic
Segmentation | U-Net, SegNet, DeepLab,
PSPNet, FCN, ERFNet,
ICNet, Mask R-CNN, HRNet,
ENet, and others. | Polygons | Specific attributes | Not supported |
-| [COCO 1.0](format-coco) | JSON | Detection, Semantic
Segmentation | YOLO (You Only Look Once),
Faster R-CNN, Mask R-CNN, SSD (Single Shot MultiBox Detector),
RetinaNet, EfficientDet, UNet,
DeepLabv3+, CenterNet, Cascade R-CNN, and others. | Bounding Boxes, Polygons | Specific attributes | Not supported |
-| [COCO Keypoints 1.0](coco-keypoints) | .xml | Keypoints | OpenPose, PoseNet, AlphaPose,
SPM (Single Person Model),
Mask R-CNN with Keypoint Detection:, and others. | Skeletons | Specific attributes | Not supported |
-| {{< ilink "/docs/manual/advanced/formats/format-cvat#cvat-for-image-export" "CVAT for images 1.1" >}} | .xml | Any in 2D except for Video Tracking | Any model that can decode the format. | Bounding Boxes, Polygons,
Polylines, Points, Cuboids,
Skeletons, Ellipses, Masks, Tags. | All attributes | Not supported |
-| {{< ilink "/docs/manual/advanced/formats/format-cvat#cvat-for-videos-export" "CVAT for video 1.1" >}} | .xml | Any in 2D except for Classification | Any model that can decode the format. | Bounding Boxes, Polygons,
Polylines, Points, Cuboids,
Skeletons, Ellipses, Masks. | All attributes | Supported |
-| [Datumaro 1.0](format-datumaro) | JSON | Any | Any model that can decode the format.
Main format in [Datumaro](https://github.com/openvinotoolkit/datumaro) framework | Bounding Boxes, Polygons,
Polylines, Points, Cuboids,
Skeletons, Ellipses, Masks, Tags. | All attributes | Supported |
+| Format | Type | Computer Vision Task | Models | Shapes | Attributes | Video Tracks |
+|-----------------------------------------------------------------------------------------------------------------------------|---------------|-------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------| -------------------- | ------------- |
+| [CamVid 1.0](format-camvid) | .txt
.png | Semantic
Segmentation | U-Net, SegNet, DeepLab,
PSPNet, FCN, Mask R-CNN,
ICNet, ERFNet, HRNet,
V-Net, and others. | Polygons | Not supported | Not supported |
+| [Cityscapes 1.0](format-cityscapes) | .txt
.png | Semantic
Segmentation | U-Net, SegNet, DeepLab,
PSPNet, FCN, ERFNet,
ICNet, Mask R-CNN, HRNet,
ENet, and others. | Polygons | Specific attributes | Not supported |
+| [COCO 1.0](format-coco) | JSON | Detection, Semantic
Segmentation | YOLO (You Only Look Once),
Faster R-CNN, Mask R-CNN, SSD (Single Shot MultiBox Detector),
RetinaNet, EfficientDet, UNet,
DeepLabv3+, CenterNet, Cascade R-CNN, and others. | Bounding Boxes, Polygons | Specific attributes | Not supported |
+| [COCO Keypoints 1.0](coco-keypoints) | .xml | Keypoints | OpenPose, PoseNet, AlphaPose,
SPM (Single Person Model),
Mask R-CNN with Keypoint Detection:, and others. | Skeletons | Specific attributes | Not supported |
+| {{< ilink "/docs/manual/advanced/formats/format-cvat#cvat-for-image-export" "CVAT for images 1.1" >}} | .xml | Any in 2D except for Video Tracking | Any model that can decode the format. | Bounding Boxes, Polygons,
Polylines, Points, Cuboids,
Skeletons, Ellipses, Masks, Tags. | All attributes | Not supported |
+| {{< ilink "/docs/manual/advanced/formats/format-cvat#cvat-for-videos-export" "CVAT for video 1.1" >}} | .xml | Any in 2D except for Classification | Any model that can decode the format. | Bounding Boxes, Polygons,
Polylines, Points, Cuboids,
Skeletons, Ellipses, Masks. | All attributes | Supported |
+| [Datumaro 1.0](format-datumaro) | JSON | Any | Any model that can decode the format.
Main format in [Datumaro](https://github.com/openvinotoolkit/datumaro) framework | Bounding Boxes, Polygons,
Polylines, Points, Cuboids,
Skeletons, Ellipses, Masks, Tags. | All attributes | Supported |
| [ICDAR](format-icdar)
Includes ICDAR Recognition 1.0,
ICDAR Detection 1.0,
and ICDAR Segmentation 1.0
descriptions. | .txt | Text recognition,
Text detection,
Text segmentation | EAST: Efficient and Accurate
Scene Text Detector, CRNN, Mask TextSpotter, TextSnake,
and others. | Tag, Bounding Boxes, Polygons | Specific attributes | Not supported |
-| [ImageNet 1.0](format-imagenet) | .jpg
.txt | Semantic Segmentation,
Classification,
Detection | VGG (VGG16, VGG19), Inception, YOLO, Faster R-CNN , U-Net, and others | Tags | No attributes | Not supported |
-| [KITTI 1.0](format-kitti) | .txt
.png | Semantic Segmentation, Detection, 3D | PointPillars, SECOND, AVOD, YOLO, DeepSORT, PWC-Net, ORB-SLAM, and others. | Bounding Boxes, Polygons | Specific attributes | Not supported |
-| [LabelMe 3.0](format-labelme) | .xml | Compatibility,
Semantic Segmentation | U-Net, Mask R-CNN, Fast R-CNN,
Faster R-CNN, DeepLab, YOLO,
and others. | Bounding Boxes, Polygons | Supported (Polygons) | Not supported |
-| [LFW 1.0](format-lfw) | .txt | Verification,
Face recognition | OpenFace, VGGFace & VGGFace2,
FaceNet, ArcFace,
and others. | Tags, Skeletons | Specific attributes | Not supported |
-| [Market-1501 1.0](format-market1501) | .txt | Re-identification | Triplet Loss Networks,
Deep ReID models, and others. | Bounding Boxes | Specific attributes | Not supported |
-| [MOT 1.0](format-mot) | .txt | Video Tracking,
Detection | SORT, MOT-Net, IOU Tracker,
and others. | Bounding Boxes | Specific attributes | Supported |
-| [MOTS PNG 1.0](format-mots) | .png
.txt | Video Tracking,
Detection | SORT, MOT-Net, IOU Tracker,
and others. | Bounding Boxes, Masks | Specific attributes | Supported |
-| [Open Images 1.0](format-openimages) | .csv | Detection,
Classification,
Semantic Segmentation | Faster R-CNN, YOLO, U-Net,
CornerNet, and others. | Bounding Boxes, Tags, Polygons | Specific attributes | Not supported |
-| [PASCAL VOC 1.0](format-voc) | .xml | Classification, Detection | Faster R-CNN, SSD, YOLO,
AlexNet, and others. | Bounding Boxes, Tags, Polygons | Specific attributes | Not supported |
-| [Segmentation Mask 1.0](format-smask) | .txt | Semantic Segmentation | Faster R-CNN, SSD, YOLO,
AlexNet, and others. | Polygons | No attributes | Not supported |
-| [VGGFace2 1.0](format-vggface2) | .csv | Face recognition | VGGFace, ResNet, Inception,
and others. | Bounding Boxes, Points | No attributes | Not supported |
-| [WIDER Face 1.0](format-widerface) | .txt | Detection | SSD (Single Shot MultiBox Detector), Faster R-CNN, YOLO,
and others. | Bounding Boxes, Tags | Specific attributes | Not supported |
-| [YOLO 1.0](format-yolo) | .txt | Detection | YOLOv1, YOLOv2 (YOLO9000),
YOLOv3, YOLOv4, and others. | Bounding Boxes | No attributes | Not supported |
-| [YOLOv8 Detection 1.0](format-yolov8) | .txt | Detection | YOLOv8 | Bounding Boxes | No attributes | Not supported |
-| [YOLOv8 Segmentation 1.0](format-yolov8) | .txt | Instance Segmentation | YOLOv8 | Polygons, Masks | No attributes | Not supported |
-| [YOLOv8 Pose 1.0](format-yolov8) | .txt | Keypoints | YOLOv8 | Skeletons | No attributes | Not supported |
-| [YOLOv8 Oriented Bounding Boxes 1.0](format-yolov8) | .txt | Detection | YOLOv8 | Bounding Boxes | No attributes | Not supported |
-| [YOLOv8 Classification 1.0](format-yolov8-classification) | .jpg | Classification | YOLOv8 | Tags | No attributes | Not supported |
+| [ImageNet 1.0](format-imagenet) | .jpg
.txt | Semantic Segmentation,
Classification,
Detection | VGG (VGG16, VGG19), Inception, YOLO, Faster R-CNN , U-Net, and others | Tags | No attributes | Not supported |
+| [KITTI 1.0](format-kitti) | .txt
.png | Semantic Segmentation, Detection, 3D | PointPillars, SECOND, AVOD, YOLO, DeepSORT, PWC-Net, ORB-SLAM, and others. | Bounding Boxes, Polygons | Specific attributes | Not supported |
+| [LabelMe 3.0](format-labelme) | .xml | Compatibility,
Semantic Segmentation | U-Net, Mask R-CNN, Fast R-CNN,
Faster R-CNN, DeepLab, YOLO,
and others. | Bounding Boxes, Polygons | Supported (Polygons) | Not supported |
+| [LFW 1.0](format-lfw) | .txt | Verification,
Face recognition | OpenFace, VGGFace & VGGFace2,
FaceNet, ArcFace,
and others. | Tags, Skeletons | Specific attributes | Not supported |
+| [Market-1501 1.0](format-market1501) | .txt | Re-identification | Triplet Loss Networks,
Deep ReID models, and others. | Bounding Boxes | Specific attributes | Not supported |
+| [MOT 1.0](format-mot) | .txt | Video Tracking,
Detection | SORT, MOT-Net, IOU Tracker,
and others. | Bounding Boxes | Specific attributes | Supported |
+| [MOTS PNG 1.0](format-mots) | .png
.txt | Video Tracking,
Detection | SORT, MOT-Net, IOU Tracker,
and others. | Bounding Boxes, Masks | Specific attributes | Supported |
+| [Open Images 1.0](format-openimages) | .csv | Detection,
Classification,
Semantic Segmentation | Faster R-CNN, YOLO, U-Net,
CornerNet, and others. | Bounding Boxes, Tags, Polygons | Specific attributes | Not supported |
+| [PASCAL VOC 1.0](format-voc) | .xml | Classification, Detection | Faster R-CNN, SSD, YOLO,
AlexNet, and others. | Bounding Boxes, Tags, Polygons | Specific attributes | Not supported |
+| [Segmentation Mask 1.0](format-smask) | .txt | Semantic Segmentation | Faster R-CNN, SSD, YOLO,
AlexNet, and others. | Polygons | No attributes | Not supported |
+| [VGGFace2 1.0](format-vggface2) | .csv | Face recognition | VGGFace, ResNet, Inception,
and others. | Bounding Boxes, Points | No attributes | Not supported |
+| [WIDER Face 1.0](format-widerface) | .txt | Detection | SSD (Single Shot MultiBox Detector), Faster R-CNN, YOLO,
and others. | Bounding Boxes, Tags | Specific attributes | Not supported |
+| [YOLO 1.0](format-yolo) | .txt | Detection | YOLOv1, YOLOv2 (YOLO9000),
YOLOv3, YOLOv4, and others. | Bounding Boxes | No attributes | Not supported |
+| [Ultralytics YOLO Detection 1.0](format-yolo-ultralytics) | .txt | Detection | YOLOv8 | Bounding Boxes | No attributes | Not supported |
+| [Ultralytics YOLO Segmentation 1.0](format-yolo-ultralytics) | .txt | Instance Segmentation | YOLOv8 | Polygons, Masks | No attributes | Not supported |
+| [Ultralytics YOLO Pose 1.0](format-yolo-ultralytics) | .txt | Keypoints | YOLOv8 | Skeletons | No attributes | Not supported |
+| [Ultralytics YOLO Oriented Bounding Boxes 1.0](format-yolo-ultralytics) | .txt | Detection | YOLOv8 | Bounding Boxes | No attributes | Not supported |
+| [Ultralytics YOLO Classification 1.0](format-yolo-ultralytics-classification) | .jpg | Classification | YOLOv8 | Tags | No attributes | Not supported |
diff --git a/site/content/en/docs/manual/advanced/formats/format-yolov8-classification.md b/site/content/en/docs/manual/advanced/formats/format-yolo-ultralytics-classification.md
similarity index 78%
rename from site/content/en/docs/manual/advanced/formats/format-yolov8-classification.md
rename to site/content/en/docs/manual/advanced/formats/format-yolo-ultralytics-classification.md
index 8857c11518b3..734fd229a052 100644
--- a/site/content/en/docs/manual/advanced/formats/format-yolov8-classification.md
+++ b/site/content/en/docs/manual/advanced/formats/format-yolo-ultralytics-classification.md
@@ -1,16 +1,16 @@
---
-title: 'YOLOv8-Classification'
-linkTitle: 'YOLOv8-Classification'
+title: 'Ultralytics-YOLO-Classification'
+linkTitle: 'Ultralytics-YOLO-Classification'
weight: 7
-description: 'How to export and import data in YOLOv8 Classification format'
+description: 'How to export and import data in Ultralytics YOLO Classification format'
---
For more information, see:
- [Format specification](https://docs.ultralytics.com/datasets/classify/)
-- [Dataset examples](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolov8_classification)
+- [Dataset examples](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolo_ultralytics_classification)
-## YOLOv8 Classification export
+## Ultralytics YOLO Classification export
For export of images:
diff --git a/site/content/en/docs/manual/advanced/formats/format-yolov8.md b/site/content/en/docs/manual/advanced/formats/format-yolo-ultralytics.md
similarity index 80%
rename from site/content/en/docs/manual/advanced/formats/format-yolov8.md
rename to site/content/en/docs/manual/advanced/formats/format-yolo-ultralytics.md
index 4d2975900ab8..baccd8e45f51 100644
--- a/site/content/en/docs/manual/advanced/formats/format-yolov8.md
+++ b/site/content/en/docs/manual/advanced/formats/format-yolo-ultralytics.md
@@ -1,24 +1,24 @@
---
-title: 'YOLOv8'
-linkTitle: 'YOLOv8'
+title: 'Ultralytics YOLO'
+linkTitle: 'Ultralytics YOLO'
weight: 7
-description: 'How to export and import data in YOLOv8 formats'
+description: 'How to export and import data in Ultralytics YOLO formats'
---
-YOLOv8 is a format family which consists of four formats:
+Ultralytics YOLO is a format family which consists of four formats:
- [Detection](https://docs.ultralytics.com/datasets/detect/)
- [Oriented bounding Box](https://docs.ultralytics.com/datasets/obb/)
- [Segmentation](https://docs.ultralytics.com/datasets/segment/)
- [Pose](https://docs.ultralytics.com/datasets/pose/)
Dataset examples:
-- [Detection](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolov8_detection)
-- [Oriented Bounding Boxes](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolov8_oriented_boxes)
-- [Segmentation](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolov8_segmentation)
-- [Pose](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolov8_pose)
+- [Detection](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolo_ultralytics_detection)
+- [Oriented Bounding Boxes](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolo_ultralytics_oriented_boxes)
+- [Segmentation](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolo_ultralytics_segmentation)
+- [Pose](https://github.com/cvat-ai/datumaro/tree/develop/tests/assets/yolo_dataset/yolo_ultralytics_pose)
-## YOLOv8 export
+## Ultralytics YOLO export
For export of images:
@@ -59,7 +59,7 @@ images//image2.jpg
path: ./ # dataset root dir
train: train.txt # train images (relative to 'path')
-# YOLOv8 Pose specific field
+# Ultralytics YOLO Pose specific field
# First number is the number of points in a skeleton.
# If there are several skeletons with different number of points, it is the greatest number of points
# Second number defines the format of point info in annotation txt files
@@ -75,7 +75,7 @@ names:
# .txt:
# content depends on format
-# YOLOv8 Detection:
+# Ultralytics YOLO Detection:
# label_id - id from names field of data.yaml
# cx, cy - relative coordinates of the bbox center
# rw, rh - relative size of the bbox
@@ -83,19 +83,19 @@ names:
1 0.3 0.8 0.1 0.3
2 0.7 0.2 0.3 0.1
-# YOLOv8 Oriented Bounding Boxes:
+# Ultralytics YOLO Oriented Bounding Boxes:
# xn, yn - relative coordinates of the n-th point
# label_id x1 y1 x2 y2 x3 y3 x4 y4
1 0.3 0.8 0.1 0.3 0.4 0.5 0.7 0.5
2 0.7 0.2 0.3 0.1 0.4 0.5 0.5 0.6
-# YOLOv8 Segmentation:
+# Ultralytics YOLO Segmentation:
# xn, yn - relative coordinates of the n-th point
# label_id x1 y1 x2 y2 x3 y3 ...
1 0.3 0.8 0.1 0.3 0.4 0.5
2 0.7 0.2 0.3 0.1 0.4 0.5 0.5 0.6 0.7 0.5
-# YOLOv8 Pose:
+# Ultralytics YOLO Pose:
# cx, cy - relative coordinates of the bbox center
# rw, rh - relative size of the bbox
# xn, yn - relative coordinates of the n-th point
@@ -126,3 +126,14 @@ is named to correspond with its associated image file.
For example, `frame_000001.txt` serves as the annotation for the
`frame_000001.jpg` image.
+
+# Import annotations in Ultralytics YOLO formats
+
+Uploaded file: a zip archive of the mentioned above structure.
+
+```bash
+.zip/
+└── annotations/
+ ├── subset1.json # fully description of classes and all dataset items
+ └── subset2.json # fully description of classes and all dataset items
+```
diff --git a/tests/python/rest_api/test_projects.py b/tests/python/rest_api/test_projects.py
index d3d807d68088..7785454c8839 100644
--- a/tests/python/rest_api/test_projects.py
+++ b/tests/python/rest_api/test_projects.py
@@ -714,7 +714,7 @@ def test_can_import_dataset_in_org(self, admin_user: str):
("CVAT for images 1.1", "CVAT 1.1"),
("CVAT for video 1.1", "CVAT 1.1"),
("Datumaro 1.0", "Datumaro 1.0"),
- ("YOLOv8 Pose 1.0", "YOLOv8 Pose 1.0"),
+ ("Ultralytics YOLO Pose 1.0", "Ultralytics YOLO Pose 1.0"),
),
)
def test_can_export_and_import_dataset_with_skeletons(
@@ -1078,10 +1078,10 @@ def _export_task(task_id: int, format_name: str) -> io.BytesIO:
("LFW 1.0", "{subset}/images/"),
("Cityscapes 1.0", "imgsFine/leftImg8bit/{subset}/"),
("Open Images V6 1.0", "images/{subset}/"),
- ("YOLOv8 Detection 1.0", "images/{subset}/"),
- ("YOLOv8 Oriented Bounding Boxes 1.0", "images/{subset}/"),
- ("YOLOv8 Segmentation 1.0", "images/{subset}/"),
- ("YOLOv8 Pose 1.0", "images/{subset}/"),
+ ("Ultralytics YOLO Detection 1.0", "images/{subset}/"),
+ ("Ultralytics YOLO Oriented Bounding Boxes 1.0", "images/{subset}/"),
+ ("Ultralytics YOLO Segmentation 1.0", "images/{subset}/"),
+ ("Ultralytics YOLO Pose 1.0", "images/{subset}/"),
],
)
@pytest.mark.parametrize("api_version", (1, 2))
diff --git a/tests/python/rest_api/test_tasks.py b/tests/python/rest_api/test_tasks.py
index 15496cc31f73..70d8a84827bb 100644
--- a/tests/python/rest_api/test_tasks.py
+++ b/tests/python/rest_api/test_tasks.py
@@ -947,7 +947,7 @@ def test_export_dataset_after_deleting_related_cloud_storage(
[
("Datumaro 1.0", "", "images/{subset}"),
("YOLO 1.1", "train", "obj_{subset}_data"),
- ("YOLOv8 Detection 1.0", "train", "images/{subset}"),
+ ("Ultralytics YOLO Detection 1.0", "train", "images/{subset}"),
],
)
@pytest.mark.parametrize("api_version", (1, 2))
@@ -5422,10 +5422,10 @@ def test_can_import_datumaro_json(self, admin_user, tasks, dimension):
"Open Images V6 1.0",
"Datumaro 1.0",
"Datumaro 3D 1.0",
- "YOLOv8 Oriented Bounding Boxes 1.0",
- "YOLOv8 Detection 1.0",
- "YOLOv8 Pose 1.0",
- "YOLOv8 Segmentation 1.0",
+ "Ultralytics YOLO Oriented Bounding Boxes 1.0",
+ "Ultralytics YOLO Detection 1.0",
+ "Ultralytics YOLO Pose 1.0",
+ "Ultralytics YOLO Segmentation 1.0",
],
)
def test_check_import_error_on_wrong_file_structure(self, tasks_with_shapes, format_name):