diff --git a/db.sqlite3 b/db.sqlite3
index f37ee0ba0..ec9bb6fcb 100644
Binary files a/db.sqlite3 and b/db.sqlite3 differ
diff --git a/exercises/static/exercises/dl_digit_classifier/exercise.py b/exercises/static/exercises/dl_digit_classifier/exercise.py
index c6db07f43..21af86563 100644
--- a/exercises/static/exercises/dl_digit_classifier/exercise.py
+++ b/exercises/static/exercises/dl_digit_classifier/exercise.py
@@ -13,6 +13,7 @@
import cv2
import numpy as np
import onnxruntime
+from onnxruntime.quantization import quantize_dynamic, QuantType
from websocket_server import WebsocketServer
from gui import GUI, ThreadGUI
@@ -45,7 +46,8 @@ def __init__(self):
self.gui = GUI(self.host, self.hal)
# The process function
- def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
+ # The process function
+ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
"""
Given a DL model in onnx format, yield prediction per frame.
:param raw_dl_model: raw DL model transferred through websocket
@@ -60,16 +62,44 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
raw_dl_model_bytes = raw_dl_model.encode('ascii')
raw_dl_model_bytes = base64.b64decode(raw_dl_model_bytes)
- # Load ONNX model
+ # Load and optimize ONNX model
ort_session = None
try:
with open(self.aux_model_fname, "wb") as f:
f.write(raw_dl_model_bytes)
- ort_session = onnxruntime.InferenceSession(self.aux_model_fname)
+
+ # Load the original model
+ model = onnx.load(self.aux_model_fname)
+
+ # Apply optimizations directly using ONNX Runtime
+ model_optimized = onnx.optimizer.optimize(model, passes=[
+ "eliminate_identity",
+ "eliminate_deadend",
+ "eliminate_nop_dropout",
+ "eliminate_nop_transpose",
+ "fuse_bn_into_conv",
+ "fuse_consecutive_transposes",
+ "fuse_pad_into_conv",
+ "fuse_transpose_into_gemm",
+ "lift_lexical_references",
+ "nop_elimination",
+ "split_init"
+ ])
+
+ # Save the optimized model
+ optimized_model_fname = "optimized_model.onnx"
+ onnx.save(model_optimized, optimized_model_fname)
+
+ # Quantize the model
+ quantized_model_fname = "quantized_model.onnx"
+ quantize_dynamic(optimized_model_fname, quantized_model_fname, weight_type=QuantType.QInt8)
+
+ # Load the quantized model
+ ort_session = onnxruntime.InferenceSession(quantized_model_fname)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(str(exc_value))
- print("ERROR: Model couldn't be loaded")
+ print("ERROR: Model couldn't be loaded or optimized")
try:
# Init auxiliar variables used for stabilized predictions
@@ -102,10 +132,10 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
pred = int(np.argmax(output, axis=1)) # get the index of the max log-probability
end = time.time()
- frame_time = round(end-start, 3)
- fps = 1.0/frame_time
+ frame_time = round(end - start, 3)
+ fps = 1.0 / frame_time
# number of consecutive frames that must be reached to consider a validprediction
- n_consecutive_frames = int(fps/2)
+ n_consecutive_frames = int(fps / 2)
# For stability, only show digit if detected in more than n consecutive frames
if pred != previous_established_pred:
@@ -122,9 +152,9 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
previous_pred = pred
# Show region used as ROI
- cv2.rectangle(input_image,pt2=(w_border, h_border),pt1=(w_border + w_roi, h_border + h_roi),color=(255, 0, 0),thickness=3)
+ cv2.rectangle(input_image, pt2=(w_border, h_border), pt1=(w_border + w_roi, h_border + h_roi), color=(255, 0, 0), thickness=3)
# Show FPS count
- cv2.putText(input_image, "FPS: {}".format(int(fps)), (7,25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)
+ cv2.putText(input_image, "FPS: {}".format(int(fps)), (7, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
# Send result
self.gui.showResult(input_image, str(previous_established_pred))
@@ -136,7 +166,7 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
self.iteration_counter += 1
- # The code should be run for atleast the target time step
+ # The code should be run for at least the target time step
# If it's less put to sleep
if (ms < self.ideal_cycle):
time.sleep((self.ideal_cycle - ms) / 1000.0)
@@ -149,6 +179,7 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
exc_type, exc_value, exc_traceback = sys.exc_info()
print(str(exc_value))
+
# Function to measure the frequency of iterations
def measure_frequency(self):
previous_time = datetime.now()
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/entry_point/exercise.py b/exercises/static/exercises/dl_digit_classifier_newmanager/entry_point/exercise.py
new file mode 100644
index 000000000..ebcedb2b9
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/entry_point/exercise.py
@@ -0,0 +1,13 @@
+import os.path
+from typing import Callable
+
+from src.manager.libs.applications.compatibility.exercise_wrapper_ros2 import CompatibilityExerciseWrapperRos2
+
+
+class Exercise(CompatibilityExerciseWrapperRos2):
+ def __init__(self, circuit: str, update_callback: Callable):
+ current_path = os.path.dirname(__file__)
+
+ super(Exercise, self).__init__(exercise_command=f"{current_path}/../../python_template/ros2_humble/exercise.py 0.0.0.0",
+ gui_command=f"{current_path}/../../python_template/ros2_humble/gui.py 0.0.0.0 {circuit}",
+ update_callback=update_callback)
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/launch/ros2_humble/digit_classifier.launch.py b/exercises/static/exercises/dl_digit_classifier_newmanager/launch/ros2_humble/digit_classifier.launch.py
new file mode 100644
index 000000000..49cb452db
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/launch/ros2_humble/digit_classifier.launch.py
@@ -0,0 +1,16 @@
+import launch
+from launch import LaunchDescription
+from launch_ros.actions import Node
+
+def generate_launch_description():
+ return LaunchDescription([
+ Node(
+ package='v4l2_camera',
+ executable='v4l2_camera_node',
+ name='v4l2_camera_node',
+ parameters=[
+ {'video_device': '/dev/video0'}
+ ],
+ ),
+ ])
+
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/GUI.py b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/GUI.py
new file mode 100755
index 000000000..3ec81a280
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/GUI.py
@@ -0,0 +1,194 @@
+import json
+import os
+import rclpy
+import cv2
+import sys
+import base64
+import threading
+import time
+import numpy as np
+from datetime import datetime
+import websocket
+import subprocess
+import logging
+
+from hal_interfaces.general.odometry import OdometryNode
+from console import start_console
+
+
+# Graphical User Interface Class
+class GUI:
+ # Initialization function
+ # The actual initialization
+ def __init__(self, host):
+
+ self.payload = {'image': ''}
+
+ # ROS2 init
+ if not rclpy.ok():
+ rclpy.init(args=None)
+
+
+ # Image variables
+ self.image_to_be_shown = None
+ self.digit_to_be_shown = None
+ self.image_to_be_shown_updated = False
+ self.image_show_lock = threading.Lock()
+ self.host = host
+ self.client = None
+
+
+
+ self.ack = False
+ self.ack_lock = threading.Lock()
+
+ # Create the lap object
+ # TODO: maybe move this to HAL and have it be hybrid
+
+
+ self.client_thread = threading.Thread(target=self.run_websocket)
+ self.client_thread.start()
+
+ def run_websocket(self):
+ while True:
+ print("GUI WEBSOCKET CONNECTED")
+ self.client = websocket.WebSocketApp(self.host, on_message=self.on_message)
+ self.client.run_forever(ping_timeout=None, ping_interval=0)
+
+ # Function to prepare image payload
+ # Encodes the image as a JSON string and sends through the WS
+ def payloadImage(self):
+ with self.image_show_lock:
+ image_to_be_shown_updated = self.image_to_be_shown_updated
+ image_to_be_shown = self.image_to_be_shown
+
+ image = image_to_be_shown
+ payload = {'image': '', 'shape': ''}
+
+ if not image_to_be_shown_updated:
+ return payload
+
+ shape = image.shape
+ frame = cv2.imencode('.JPEG', image)[1]
+ encoded_image = base64.b64encode(frame)
+
+ payload['image'] = encoded_image.decode('utf-8')
+ payload['shape'] = shape
+
+ with self.image_show_lock:
+ self.image_to_be_shown_updated = False
+
+ return payload
+
+ # Function for student to call
+ def showImage(self, image):
+ with self.image_show_lock:
+ self.image_to_be_shown = image
+ self.image_to_be_shown_updated = True
+
+ # Update the gui
+ def update_gui(self):
+ # print("GUI update")
+ # Payload Image Message
+ payload = self.payloadImage()
+ self.payload["image"] = json.dumps(payload)
+
+
+ message = json.dumps(self.payload)
+ if self.client:
+ try:
+ self.client.send(message)
+ # print(message)
+ except Exception as e:
+ print(f"Error sending message: {e}")
+
+ def on_message(self, ws, message):
+ """Handles incoming messages from the websocket client."""
+ if message.startswith("#ack"):
+ # print("on message" + str(message))
+ self.set_acknowledge(True)
+
+ def get_acknowledge(self):
+ """Gets the acknowledge status."""
+ with self.ack_lock:
+ ack = self.ack
+
+ return ack
+
+ def set_acknowledge(self, value):
+ """Sets the acknowledge status."""
+ with self.ack_lock:
+ self.ack = value
+
+
+class ThreadGUI:
+ """Class to manage GUI updates and frequency measurements in separate threads."""
+
+ def __init__(self, gui):
+ """Initializes the ThreadGUI with a reference to the GUI instance."""
+ self.gui = gui
+ self.ideal_cycle = 80
+ self.real_time_factor = 0
+ self.frequency_message = {'brain': '', 'gui': ''}
+ self.iteration_counter = 0
+ self.running = True
+
+ def start(self):
+ """Starts the GUI, frequency measurement, and real-time factor threads."""
+ self.frequency_thread = threading.Thread(target=self.measure_and_send_frequency)
+ self.gui_thread = threading.Thread(target=self.run)
+ self.frequency_thread.start()
+ self.gui_thread.start()
+ print("GUI Thread Started!")
+
+ def measure_and_send_frequency(self):
+ """Measures and sends the frequency of GUI updates and brain cycles."""
+ previous_time = datetime.now()
+ while self.running:
+ time.sleep(2)
+
+ current_time = datetime.now()
+ dt = current_time - previous_time
+ ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
+ previous_time = current_time
+ measured_cycle = ms / self.iteration_counter if self.iteration_counter > 0 else 0
+ self.iteration_counter = 0
+ brain_frequency = round(1000 / measured_cycle, 1) if measured_cycle != 0 else 0
+ gui_frequency = round(1000 / self.ideal_cycle, 1)
+ self.frequency_message = {'brain': brain_frequency, 'gui': gui_frequency}
+ message = json.dumps(self.frequency_message)
+ if self.gui.client:
+ try:
+ self.gui.client.send(message)
+ except Exception as e:
+ print(f"Error sending frequency message: {e}")
+
+ def run(self):
+ """Main loop to update the GUI at regular intervals."""
+ while self.running:
+ start_time = datetime.now()
+
+ self.gui.update_gui()
+ self.iteration_counter += 1
+ finish_time = datetime.now()
+
+ dt = finish_time - start_time
+ ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
+ sleep_time = max(0, (50 - ms) / 1000.0)
+ time.sleep(sleep_time)
+
+
+# Create a GUI interface
+host = "ws://127.0.0.1:2303"
+gui_interface = GUI(host)
+
+# Spin a thread to keep the interface updated
+thread_gui = ThreadGUI(gui_interface)
+thread_gui.start()
+
+# Redirect the console
+start_console()
+
+def showImage(image):
+ gui_interface.showImage(image)
+
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/HAL.py b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/HAL.py
new file mode 100755
index 000000000..b3cda0c1b
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/HAL.py
@@ -0,0 +1,41 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import threading
+import cv2
+
+current_frame = None # Global variable to store the frame
+
+class WebcamSubscriber(Node):
+ def __init__(self):
+ super().__init__('webcam_subscriber')
+ self.subscription = self.create_subscription(
+ Image,
+ '/image_raw',
+ self.listener_callback,
+ 10)
+ self.subscription # prevent unused variable warning
+ self.bridge = CvBridge()
+
+ def listener_callback(self, msg):
+ global current_frame
+ self.get_logger().info('Receiving video frame')
+ current_frame = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
+
+def run_webcam_node():
+
+ webcam_subscriber = WebcamSubscriber()
+
+ rclpy.spin(webcam_subscriber)
+ webcam_subscriber.destroy_node()
+
+
+# Start the ROS2 node in a separate thread
+thread = threading.Thread(target=run_webcam_node)
+thread.start()
+
+def getImage():
+ global current_frame
+ return current_frame
+
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/README.md b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/README.md
new file mode 100644
index 000000000..68bb5efc8
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/README.md
@@ -0,0 +1 @@
+[Exercise Documentation Website](https://jderobot.github.io/RoboticsAcademy/exercises/ComputerVision/dl_digit_classifier)
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/console.py b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/console.py
new file mode 100755
index 000000000..5335016ae
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/python_template/ros2_humble/console.py
@@ -0,0 +1,18 @@
+# Functions to start and close console
+import os
+import sys
+
+def start_console():
+ # Get all the file descriptors and choose the latest one
+ fds = os.listdir("/dev/pts/")
+ fds.sort()
+ console_fd = fds[-2]
+
+ sys.stderr = open('/dev/pts/' + console_fd, 'w')
+ sys.stdout = open('/dev/pts/' + console_fd, 'w')
+ sys.stdin = open('/dev/pts/' + console_fd, 'w')
+
+def close_console():
+ sys.stderr.close()
+ sys.stdout.close()
+ sys.stdin.close()
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/DigClassMain.js b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/DigClassMain.js
new file mode 100644
index 000000000..dcc0c28f0
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/DigClassMain.js
@@ -0,0 +1,14 @@
+import * as React from "react";
+import {Fragment} from "react";
+
+import "./css/DigClassMain.css";
+
+const DigClassMain = (props) => {
+ return (
+
+ {props.children}
+
+ );
+};
+
+export default DigClassMain;
\ No newline at end of file
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/MapSelectorFollow.js b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/MapSelectorFollow.js
new file mode 100644
index 000000000..2faaeaca6
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/MapSelectorFollow.js
@@ -0,0 +1,156 @@
+import React, { useEffect, useState } from "react";
+import MenuItem from "@mui/material/MenuItem";
+import { FormControl, InputLabel, Select, Box } from "@mui/material";
+
+const exerciseConfig = JSON.parse(
+ document.getElementById("exercise-config").textContent
+);
+const exerciseId = exerciseConfig.exercise_id;
+var ros_version = 1;
+
+export default function MapSelectorFollow(props) {
+ const changeConfig = (circuitPath) => {
+ const config = JSON.parse(
+ document.getElementById("exercise-config").textContent
+ );
+ config.application.params = { circuit: circuitPath };
+ config.launch_file = `$EXERCISE_FOLDER/launch/digit_classifier.launch`;
+ return config;
+ };
+
+ const handleCircuitChange = (e) => {
+ context.mapSelected = e.name;
+ setSelectedCircuit(e);
+ let full_config = JSON.parse(
+ document.getElementById("exercise-config").textContent
+ );
+ let config = full_config[`ROS${ros_version}`][0];
+ config.application.params = { circuit: e.name };
+ config.launch_file = e.path;
+ config['exercise_id'] = exerciseId;
+ config["world"] = "physical";
+ config["visualization"] = "physical_rae";
+ if (ros_version == 2) {
+ config["resource_folders"] = "$EXERCISE_FOLDER/launch/ros2_humble";
+ }
+
+ config["launch_file"] = e.path;
+ config["visualization"] = "physical_rae";
+ config.height = window.innerHeight / 2;
+ config.width = window.innerWidth / 2;
+ window.RoboticsExerciseComponents.commsManager.terminate().then(() => {
+ window.RoboticsReactComponents.MessageSystem.Loading.showLoading(
+ "Launching World in Robotics Backend"
+ );
+ window.RoboticsExerciseComponents.commsManager.launch(config).then(()=> {
+ RoboticsReactComponents.MessageSystem.Loading.hideLoading();
+ }).catch((e) => {
+ RoboticsReactComponents.MessageSystem.Loading.showFailLoading(
+ `Error launching the world:${e.data.message}. Try changing the world or reloading the page`
+ );
+ });
+ });
+ };
+
+ const [disabled, setDisabled] = useState(true);
+ const [circuitOptions, setCircuitOptions] = useState([]);
+ const [selectedCircuit, setSelectedCircuit] = useState("");
+
+
+ useEffect(() => {
+ const callback = (message) => {
+ if (message.data.state !== "connected") {
+ setDisabled(false);
+ } else {
+ setDisabled(true);
+ }
+ };
+ window.RoboticsExerciseComponents.commsManager.subscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.STATE_CHANGED],
+ callback
+ );
+
+ return () => {
+ window.RoboticsExerciseComponents.commsManager.unsubscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.STATE_CHANGED],
+ callback
+ );
+ };
+ }, []);
+
+ useEffect(() => {
+ const serverBase = `${document.location.protocol}//${document.location.hostname}:7164`;
+ let requestUrl = `${serverBase}/exercises/exercise/${exerciseId}/launch_files`;
+ const request = new Request(requestUrl, {
+ method: "POST",
+ headers: {
+ "Content-type": "application/json",
+ 'X-CSRFToken': context.csrf
+ },
+ })
+ fetch(request)
+ .then((response) => response.json())
+ .then((data) => {
+ const rosVersionURL = `${serverBase}/exercises/ros_version/`;
+ ros_version = 1;
+ fetch(rosVersionURL)
+ .then((res) => res.json())
+ .then((msg) => {
+ ros_version = msg.version;
+
+ if (isNaN(parseInt(ros_version))) {
+ ros_version = 1;
+ }
+ const config = data;
+ // Selects the configs available for the ROS version installed
+ const availableConfigs = {};
+ availableConfigs[`ROS${ros_version}`] = config[`ROS${ros_version}`];
+ console.log(availableConfigs);
+ setSelectedCircuit(availableConfigs[`ROS${ros_version}`][0]);
+ setCircuitOptions(availableConfigs[`ROS${ros_version}`])
+ context.mapSelected =
+ availableConfigs[`ROS${ros_version}`][0].name;
+ })
+ setCircuitOptions(data.launch);
+
+ })
+ .catch((error) => {
+ console.log("Error fetching circuit options:", error);
+ });
+ }, []);
+
+
+
+ return (
+
+
+
+ World
+
+
+
+
+ );
+}
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/SpecificFollowLine.js b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/SpecificFollowLine.js
new file mode 100644
index 000000000..84c610fee
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/SpecificFollowLine.js
@@ -0,0 +1,53 @@
+import * as React from "react";
+import { Box } from "@mui/material";
+import "./css/GUICanvas.css";
+import { drawImage } from "./helpers/showImagesFollowLine";
+
+
+const SpecificFollowLine = (props) => {
+ const [image, setImage] = React.useState(null)
+ const canvasRef = React.useRef(null)
+
+ React.useEffect(() => {
+ console.log("TestShowScreen subscribing to ['update'] events");
+ const callback = (message) => {
+ if(message.data.update.image){
+ console.log('image')
+ const image = JSON.parse(message.data.update.image)
+ if(image.image){
+ drawImage(message.data.update)
+ }
+ }
+ };
+
+ window.RoboticsExerciseComponents.commsManager.subscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.UPDATE],
+ callback
+ );
+
+ return () => {
+ console.log("TestShowScreen unsubscribing from ['state-changed'] events");
+ window.RoboticsExerciseComponents.commsManager.unsubscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.UPDATE],
+ callback
+ );
+ };
+ }, []);
+
+ return (
+
+
+
+ );
+};
+
+SpecificFollowLine.defaultProps = {
+ width: 800,
+ height: 600,
+};
+
+export default SpecificFollowLine
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/css/DigClassMain.css b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/css/DigClassMain.css
new file mode 100644
index 000000000..3bdee7925
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/css/DigClassMain.css
@@ -0,0 +1,28 @@
+* {
+ box-sizing: border-box;
+}
+
+body, html {
+ width: 100%; height: 100%;
+}
+
+#exercise-container {
+ position: absolute;
+ top: 0; left: 0; bottom: 0; right: 0;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+#exercise-container #content {
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+}
+
+#exercise-container #content #content-exercise {
+ display: flex;
+ flex-direction: column;
+ width: 100%;
+ height: 100%;
+}
\ No newline at end of file
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/css/GUICanvas.css b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/css/GUICanvas.css
new file mode 100644
index 000000000..10997de2b
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/css/GUICanvas.css
@@ -0,0 +1,6 @@
+.exercise-canvas {
+ width: 100%;
+ height: 100%;
+ max-height: 100%;
+ background-color: #303030;
+}
diff --git a/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/helpers/showImagesFollowLine.js b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/helpers/showImagesFollowLine.js
new file mode 100644
index 000000000..26737e626
--- /dev/null
+++ b/exercises/static/exercises/dl_digit_classifier_newmanager/react-components/helpers/showImagesFollowLine.js
@@ -0,0 +1,33 @@
+// To decode the image string we will receive from server
+function decode_utf8(s){
+ return decodeURIComponent(escape(s))
+}
+
+let image = new Image();
+
+export function drawImage (data){
+ var canvas = document.getElementById("canvas"),
+ context = canvas.getContext('2d')
+
+ // For image object
+ image.onload = function(){
+ update_image();
+ }
+
+ // Request Animation Frame to remove the flickers
+ function update_image(){
+ window.requestAnimationFrame(update_image);
+ context.drawImage(image, 0, 0);
+ }
+
+ // Parse the Image Data
+ var image_data = JSON.parse(data.image),
+ source = decode_utf8(image_data.image),
+ shape = image_data.shape;
+
+ if(source != ""){
+ image.src = "data:image/jpeg;base64," + source;
+ canvas.width = shape[1];
+ canvas.height = shape[0];
+ }
+}
diff --git a/exercises/static/exercises/dl_digit_classifier_react/python_template/ros2_humble/exercise.py b/exercises/static/exercises/dl_digit_classifier_react/python_template/ros2_humble/exercise.py
index c6db07f43..21af86563 100644
--- a/exercises/static/exercises/dl_digit_classifier_react/python_template/ros2_humble/exercise.py
+++ b/exercises/static/exercises/dl_digit_classifier_react/python_template/ros2_humble/exercise.py
@@ -13,6 +13,7 @@
import cv2
import numpy as np
import onnxruntime
+from onnxruntime.quantization import quantize_dynamic, QuantType
from websocket_server import WebsocketServer
from gui import GUI, ThreadGUI
@@ -45,7 +46,8 @@ def __init__(self):
self.gui = GUI(self.host, self.hal)
# The process function
- def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
+ # The process function
+ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
"""
Given a DL model in onnx format, yield prediction per frame.
:param raw_dl_model: raw DL model transferred through websocket
@@ -60,16 +62,44 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
raw_dl_model_bytes = raw_dl_model.encode('ascii')
raw_dl_model_bytes = base64.b64decode(raw_dl_model_bytes)
- # Load ONNX model
+ # Load and optimize ONNX model
ort_session = None
try:
with open(self.aux_model_fname, "wb") as f:
f.write(raw_dl_model_bytes)
- ort_session = onnxruntime.InferenceSession(self.aux_model_fname)
+
+ # Load the original model
+ model = onnx.load(self.aux_model_fname)
+
+ # Apply optimizations directly using ONNX Runtime
+ model_optimized = onnx.optimizer.optimize(model, passes=[
+ "eliminate_identity",
+ "eliminate_deadend",
+ "eliminate_nop_dropout",
+ "eliminate_nop_transpose",
+ "fuse_bn_into_conv",
+ "fuse_consecutive_transposes",
+ "fuse_pad_into_conv",
+ "fuse_transpose_into_gemm",
+ "lift_lexical_references",
+ "nop_elimination",
+ "split_init"
+ ])
+
+ # Save the optimized model
+ optimized_model_fname = "optimized_model.onnx"
+ onnx.save(model_optimized, optimized_model_fname)
+
+ # Quantize the model
+ quantized_model_fname = "quantized_model.onnx"
+ quantize_dynamic(optimized_model_fname, quantized_model_fname, weight_type=QuantType.QInt8)
+
+ # Load the quantized model
+ ort_session = onnxruntime.InferenceSession(quantized_model_fname)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(str(exc_value))
- print("ERROR: Model couldn't be loaded")
+ print("ERROR: Model couldn't be loaded or optimized")
try:
# Init auxiliar variables used for stabilized predictions
@@ -102,10 +132,10 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
pred = int(np.argmax(output, axis=1)) # get the index of the max log-probability
end = time.time()
- frame_time = round(end-start, 3)
- fps = 1.0/frame_time
+ frame_time = round(end - start, 3)
+ fps = 1.0 / frame_time
# number of consecutive frames that must be reached to consider a validprediction
- n_consecutive_frames = int(fps/2)
+ n_consecutive_frames = int(fps / 2)
# For stability, only show digit if detected in more than n consecutive frames
if pred != previous_established_pred:
@@ -122,9 +152,9 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
previous_pred = pred
# Show region used as ROI
- cv2.rectangle(input_image,pt2=(w_border, h_border),pt1=(w_border + w_roi, h_border + h_roi),color=(255, 0, 0),thickness=3)
+ cv2.rectangle(input_image, pt2=(w_border, h_border), pt1=(w_border + w_roi, h_border + h_roi), color=(255, 0, 0), thickness=3)
# Show FPS count
- cv2.putText(input_image, "FPS: {}".format(int(fps)), (7,25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)
+ cv2.putText(input_image, "FPS: {}".format(int(fps)), (7, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
# Send result
self.gui.showResult(input_image, str(previous_established_pred))
@@ -136,7 +166,7 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
self.iteration_counter += 1
- # The code should be run for atleast the target time step
+ # The code should be run for at least the target time step
# If it's less put to sleep
if (ms < self.ideal_cycle):
time.sleep((self.ideal_cycle - ms) / 1000.0)
@@ -149,6 +179,7 @@ def process_dl_model(self, raw_dl_model, roi_scale=0.75, input_size=(28, 28)):
exc_type, exc_value, exc_traceback = sys.exc_info()
print(str(exc_value))
+
# Function to measure the frequency of iterations
def measure_frequency(self):
previous_time = datetime.now()
diff --git a/exercises/static/exercises/human_detection_newmanager/entry_point/exercise.py b/exercises/static/exercises/human_detection_newmanager/entry_point/exercise.py
new file mode 100644
index 000000000..ebcedb2b9
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/entry_point/exercise.py
@@ -0,0 +1,13 @@
+import os.path
+from typing import Callable
+
+from src.manager.libs.applications.compatibility.exercise_wrapper_ros2 import CompatibilityExerciseWrapperRos2
+
+
+class Exercise(CompatibilityExerciseWrapperRos2):
+ def __init__(self, circuit: str, update_callback: Callable):
+ current_path = os.path.dirname(__file__)
+
+ super(Exercise, self).__init__(exercise_command=f"{current_path}/../../python_template/ros2_humble/exercise.py 0.0.0.0",
+ gui_command=f"{current_path}/../../python_template/ros2_humble/gui.py 0.0.0.0 {circuit}",
+ update_callback=update_callback)
diff --git a/exercises/static/exercises/human_detection_newmanager/launch/ros2_humble/human_detection_standard.py b/exercises/static/exercises/human_detection_newmanager/launch/ros2_humble/human_detection_standard.py
new file mode 100644
index 000000000..3e2d0101a
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/launch/ros2_humble/human_detection_standard.py
@@ -0,0 +1,7 @@
+import launch
+from launch import LaunchDescription
+
+def generate_launch_description():
+ return LaunchDescription()
+
+
diff --git a/exercises/static/exercises/human_detection_newmanager/launch/ros2_humble/human_detection_webcam.launch.py b/exercises/static/exercises/human_detection_newmanager/launch/ros2_humble/human_detection_webcam.launch.py
new file mode 100644
index 000000000..49cb452db
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/launch/ros2_humble/human_detection_webcam.launch.py
@@ -0,0 +1,16 @@
+import launch
+from launch import LaunchDescription
+from launch_ros.actions import Node
+
+def generate_launch_description():
+ return LaunchDescription([
+ Node(
+ package='v4l2_camera',
+ executable='v4l2_camera_node',
+ name='v4l2_camera_node',
+ parameters=[
+ {'video_device': '/dev/video0'}
+ ],
+ ),
+ ])
+
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/GUI.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/GUI.py
new file mode 100755
index 000000000..f12d82647
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/GUI.py
@@ -0,0 +1,192 @@
+import json
+import os
+import rclpy
+import cv2
+import sys
+import base64
+import threading
+import time
+import numpy as np
+from datetime import datetime
+import websocket
+import subprocess
+import logging
+
+from hal_interfaces.general.odometry import OdometryNode
+from console import start_console
+
+
+# Graphical User Interface Class
+class GUI:
+ # Initialization function
+ # The actual initialization
+ def __init__(self, host):
+
+ self.payload = {'image': '', 'shape': []}
+
+ # ROS2 init
+ if not rclpy.ok():
+ rclpy.init(args=None)
+
+
+ # Image variables
+ self.image_to_be_shown = None
+ self.image_to_be_shown_updated = False
+ self.image_show_lock = threading.Lock()
+ self.host = host
+ self.client = None
+
+
+
+ self.ack = False
+ self.ack_lock = threading.Lock()
+
+ # Create the lap object
+ # TODO: maybe move this to HAL and have it be hybrid
+
+
+ self.client_thread = threading.Thread(target=self.run_websocket)
+ self.client_thread.start()
+
+ def run_websocket(self):
+ while True:
+ print("GUI WEBSOCKET CONNECTED")
+ self.client = websocket.WebSocketApp(self.host, on_message=self.on_message)
+ self.client.run_forever(ping_timeout=None, ping_interval=0)
+
+ # Function to prepare image payload
+ # Encodes the image as a JSON string and sends through the WS
+ def payloadImage(self):
+ with self.image_show_lock:
+ image_to_be_shown_updated = self.image_to_be_shown_updated
+ image_to_be_shown = self.image_to_be_shown
+
+ image = image_to_be_shown
+ payload = {'image': '', 'shape': ''}
+
+ if not image_to_be_shown_updated:
+ return payload
+
+ shape = image.shape
+ frame = cv2.imencode('.JPEG', image)[1]
+ encoded_image = base64.b64encode(frame)
+
+ payload['image'] = encoded_image.decode('utf-8')
+ payload['shape'] = shape
+ with self.image_show_lock:
+ self.image_to_be_shown_updated = False
+
+ return payload
+
+ # Function for student to call
+ def showImage(self, image):
+ with self.image_show_lock:
+ self.image_to_be_shown = image
+ self.image_to_be_shown_updated = True
+
+ # Update the gui
+ def update_gui(self):
+ # print("GUI update")
+ # Payload Image Message
+ payload = self.payloadImage()
+ self.payload["image"] = json.dumps(payload)
+
+
+ message = json.dumps(self.payload)
+ if self.client:
+ try:
+ self.client.send(message)
+ # print(message)
+ except Exception as e:
+ print(f"Error sending message: {e}")
+
+ def on_message(self, ws, message):
+ """Handles incoming messages from the websocket client."""
+ if message.startswith("#ack"):
+ # print("on message" + str(message))
+ self.set_acknowledge(True)
+
+ def get_acknowledge(self):
+ """Gets the acknowledge status."""
+ with self.ack_lock:
+ ack = self.ack
+
+ return ack
+
+ def set_acknowledge(self, value):
+ """Sets the acknowledge status."""
+ with self.ack_lock:
+ self.ack = value
+
+
+class ThreadGUI:
+ """Class to manage GUI updates and frequency measurements in separate threads."""
+
+ def __init__(self, gui):
+ """Initializes the ThreadGUI with a reference to the GUI instance."""
+ self.gui = gui
+ self.ideal_cycle = 80
+ self.real_time_factor = 0
+ self.frequency_message = {'brain': '', 'gui': ''}
+ self.iteration_counter = 0
+ self.running = True
+
+ def start(self):
+ """Starts the GUI, frequency measurement, and real-time factor threads."""
+ self.frequency_thread = threading.Thread(target=self.measure_and_send_frequency)
+ self.gui_thread = threading.Thread(target=self.run)
+ self.frequency_thread.start()
+ self.gui_thread.start()
+ print("GUI Thread Started!")
+
+ def measure_and_send_frequency(self):
+ """Measures and sends the frequency of GUI updates and brain cycles."""
+ previous_time = datetime.now()
+ while self.running:
+ time.sleep(2)
+
+ current_time = datetime.now()
+ dt = current_time - previous_time
+ ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
+ previous_time = current_time
+ measured_cycle = ms / self.iteration_counter if self.iteration_counter > 0 else 0
+ self.iteration_counter = 0
+ brain_frequency = round(1000 / measured_cycle, 1) if measured_cycle != 0 else 0
+ gui_frequency = round(1000 / self.ideal_cycle, 1)
+ self.frequency_message = {'brain': brain_frequency, 'gui': gui_frequency}
+ message = json.dumps(self.frequency_message)
+ if self.gui.client:
+ try:
+ self.gui.client.send(message)
+ except Exception as e:
+ print(f"Error sending frequency message: {e}")
+
+ def run(self):
+ """Main loop to update the GUI at regular intervals."""
+ while self.running:
+ start_time = datetime.now()
+
+ self.gui.update_gui()
+ self.iteration_counter += 1
+ finish_time = datetime.now()
+
+ dt = finish_time - start_time
+ ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
+ sleep_time = max(0, (50 - ms) / 1000.0)
+ time.sleep(sleep_time)
+
+
+# Create a GUI interface
+host = "ws://127.0.0.1:2303"
+gui_interface = GUI(host)
+
+# Spin a thread to keep the interface updated
+thread_gui = ThreadGUI(gui_interface)
+thread_gui.start()
+
+# Redirect the console
+start_console()
+
+def showImage(image):
+ gui_interface.showImage(image)
+
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/HAL.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/HAL.py
new file mode 100755
index 000000000..8516d0f41
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/HAL.py
@@ -0,0 +1,49 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import threading
+import cv2
+
+current_frame = None # Global variable to store the frame
+
+class WebcamSubscriber(Node):
+ def __init__(self):
+ super().__init__('webcam_subscriber')
+ self.subscription = self.create_subscription(
+ Image,
+ '/image_raw',
+ self.listener_callback,
+ 10)
+ self.subscription # prevent unused variable warning
+ self.bridge = CvBridge()
+
+ def listener_callback(self, msg):
+ global current_frame
+ self.get_logger().info('Receiving video frame')
+ current_frame = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
+
+def run_webcam_node():
+
+ webcam_subscriber = WebcamSubscriber()
+
+ rclpy.spin(webcam_subscriber)
+ webcam_subscriber.destroy_node()
+
+
+# Start the ROS2 node in a separate thread
+thread = threading.Thread(target=run_webcam_node)
+thread.start()
+
+def getImage():
+ global current_frame
+ return current_frame
+
+uploaded_vid_capture = cv2.VideoCapture('/workspace/code/demo_video/video.mp4')
+frame_number = 0
+def getVid():
+ # After the frame has been inferred on, the frame number gets incremented in exercise.py
+ uploaded_vid_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
+ success, img = uploaded_vid_capture.read()
+ return success, img
+
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/code/Video_Inference_Test_Code.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/code/Video_Inference_Test_Code.py
new file mode 100644
index 000000000..b2d3c2b20
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/code/Video_Inference_Test_Code.py
@@ -0,0 +1,73 @@
+import GUI
+import HAL
+import base64
+from datetime import datetime
+import os
+import sys
+import cv2
+import numpy as np
+import onnxruntime as rt
+
+raw_dl_model = '/workspace/code/demo_model/Demo_Model.onnx'
+
+def display_output_detection(img, detections, scores):
+ """Draw box and label for the detections."""
+ height, width = img.shape[:2]
+ for i, detection in enumerate(detections):
+ top = int(max(0, np.floor(detection[0] * height + 0.5)))
+ left = int(max(0, np.floor(detection[1] * width + 0.5)))
+ bottom = int(min(height, np.floor(detection[2] * height + 0.5)))
+ right = int(min(width, np.floor(detection[3] * width + 0.5)))
+ cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
+ cv2.putText(img, 'Human', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
+
+def display_gt_detection(img, gt_detections):
+ """Draw ground truth bounding boxes."""
+ for gt_detection in gt_detections:
+ left, top, right, bottom = map(int, gt_detection[1:5])
+ cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 2)
+
+# Load ONNX model
+sess = rt.InferenceSession(raw_dl_model)
+input_layer_name = sess.get_inputs()[0].name
+output_layers_names = [output.name for output in sess.get_outputs()]
+
+while True:
+ # Get input webcam image
+ success, img = HAL.getVid()
+ if not success:
+ break
+
+ img_resized = cv2.resize(img, (300, 300))
+ img_data = np.reshape(
+ img_resized, (1, img_resized.shape[0], img_resized.shape[1], img_resized.shape[2]))
+
+ # Inference
+ result = sess.run(output_layers_names, {input_layer_name: img_data})
+ detection_boxes, detection_classes, detection_scores, num_detections = result
+ count = 0
+ detections = []
+ scores = []
+ batch_size = num_detections.shape[0]
+
+ for batch in range(0, batch_size):
+ for detection in range(0, int(num_detections[batch])):
+ c = detection_classes[batch][detection]
+ # Skip if not human class
+ if c != 1:
+ GUI.showImage(img)
+ continue
+
+ count += 1
+ d = detection_boxes[batch][detection]
+ detections.append(d)
+ score = detection_scores[batch][detection]
+ scores.append(score)
+
+ display_output_detection(img, detections, scores)
+ HAL.frame_number += 1
+
+ # To print FPS after each frame has been fully processed and displayed
+ GUI.showImage(img)
+
+
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/code/Webcam_Inference_Test_Code.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/code/Webcam_Inference_Test_Code.py
new file mode 100644
index 000000000..067fc4f81
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/code/Webcam_Inference_Test_Code.py
@@ -0,0 +1,60 @@
+import GUI
+import HAL
+import base64
+from datetime import datetime
+import os
+import sys
+import cv2
+import numpy as np
+import onnxruntime as rt
+raw_dl_model = '/workspace/code/demo_model/Demo_Model.onnx'
+
+def display_output_detection(img, detections, scores):
+ """Draw box and label for the detections."""
+ height, width = img.shape[:2]
+ for i, detection in enumerate(detections):
+ top = int(max(0, np.floor(detection[0] * height + 0.5)))
+ left = int(max(0, np.floor(detection[1] * width + 0.5)))
+ bottom = int(min(height, np.floor(detection[2] * height + 0.5)))
+ right = int(min(width, np.floor(detection[3] * width + 0.5)))
+ cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
+ cv2.putText(img, 'Human', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
+
+def display_gt_detection(img, gt_detections):
+ """Draw ground truth bounding boxes."""
+ for gt_detection in gt_detections:
+ left, top, right, bottom = map(int, gt_detection[1:5])
+ cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 2)
+
+# Load ONNX model
+sess = rt.InferenceSession(raw_dl_model)
+input_layer_name = sess.get_inputs()[0].name
+output_layers_names = [output.name for output in sess.get_outputs()]
+
+while True:
+ # Get input webcam image
+ img = HAL.getImage()
+ if img is not None:
+ img_resized = cv2.resize(img, (300, 300))
+ img_data = np.reshape(img_resized, (1, 300, 300, 3))
+
+ # Inference
+ result = sess.run(output_layers_names, {input_layer_name: img_data})
+ detection_boxes, detection_classes, detection_scores, num_detections = result
+
+ detections = []
+ scores = []
+ batch_size = num_detections.shape[0]
+ for batch in range(batch_size):
+ for detection in range(int(num_detections[batch])):
+ c = detection_classes[batch][detection]
+ # Skip if not human class
+ if c != 1:
+ GUI.showImage(img)
+ continue
+ detections.append(detection_boxes[batch][detection])
+ scores.append(detection_scores[batch][detection])
+
+ display_output_detection(img, detections, scores)
+ GUI.showImage(img)
+
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/console.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/console.py
new file mode 100755
index 000000000..5335016ae
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/console.py
@@ -0,0 +1,18 @@
+# Functions to start and close console
+import os
+import sys
+
+def start_console():
+ # Get all the file descriptors and choose the latest one
+ fds = os.listdir("/dev/pts/")
+ fds.sort()
+ console_fd = fds[-2]
+
+ sys.stderr = open('/dev/pts/' + console_fd, 'w')
+ sys.stdout = open('/dev/pts/' + console_fd, 'w')
+ sys.stdin = open('/dev/pts/' + console_fd, 'w')
+
+def close_console():
+ sys.stderr.close()
+ sys.stdout.close()
+ sys.stdin.close()
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/Demo_Model.onnx b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/Demo_Model.onnx
new file mode 100644
index 000000000..6d3005620
Binary files /dev/null and b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/Demo_Model.onnx differ
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/__init__.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/__init__.py
new file mode 100755
index 000000000..e69de29bb
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/threadPublisher.py b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/threadPublisher.py
new file mode 100755
index 000000000..69aa0ad48
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_model/threadPublisher.py
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 1997-2016 JDE Developers Team
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see http://www.gnu.org/licenses/.
+# Authors :
+# Alberto Martin Florido
+# Aitor Martinez Fernandez
+#
+import threading
+import time
+from datetime import datetime
+
+time_cycle = 80
+
+
+class ThreadPublisher(threading.Thread):
+
+ def __init__(self, pub, kill_event):
+ self.pub = pub
+ self.kill_event = kill_event
+ threading.Thread.__init__(self, args=kill_event)
+
+ def run(self):
+ while (not self.kill_event.is_set()):
+ start_time = datetime.now()
+
+ self.pub.publish()
+
+ finish_Time = datetime.now()
+
+ dt = finish_Time - start_time
+ ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
+ #print (ms)
+ if (ms < time_cycle):
+ time.sleep((time_cycle - ms) / 1000.0)
\ No newline at end of file
diff --git a/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_video/video.mp4 b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_video/video.mp4
new file mode 100644
index 000000000..591096aee
Binary files /dev/null and b/exercises/static/exercises/human_detection_newmanager/python_template/ros2_humble/demo_video/video.mp4 differ
diff --git a/exercises/static/exercises/human_detection_newmanager/react-components/DigClassMain.js b/exercises/static/exercises/human_detection_newmanager/react-components/DigClassMain.js
new file mode 100644
index 000000000..dcc0c28f0
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/react-components/DigClassMain.js
@@ -0,0 +1,14 @@
+import * as React from "react";
+import {Fragment} from "react";
+
+import "./css/DigClassMain.css";
+
+const DigClassMain = (props) => {
+ return (
+
+ {props.children}
+
+ );
+};
+
+export default DigClassMain;
\ No newline at end of file
diff --git a/exercises/static/exercises/human_detection_newmanager/react-components/MapSelectorFollow.js b/exercises/static/exercises/human_detection_newmanager/react-components/MapSelectorFollow.js
new file mode 100644
index 000000000..ec7de8a0e
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/react-components/MapSelectorFollow.js
@@ -0,0 +1,160 @@
+import React, { useEffect, useState } from "react";
+import MenuItem from "@mui/material/MenuItem";
+import { FormControl, InputLabel, Select, Box } from "@mui/material";
+
+const exerciseConfig = JSON.parse(
+ document.getElementById("exercise-config").textContent
+);
+const exerciseId = exerciseConfig.exercise_id;
+var ros_version = 1;
+
+export default function MapSelectorFollow(props) {
+ const changeConfig = (circuitPath) => {
+ const config = JSON.parse(
+ document.getElementById("exercise-config").textContent
+ );
+ config.application.params = { circuit: circuitPath };
+ config.launch_file = `$EXERCISE_FOLDER/launch/simple_line_follower_ros_headless_${circuitPath}.launch`;
+ return config;
+ };
+
+ const handleCircuitChange = (e) => {
+ context.mapSelected = e.name;
+ setSelectedCircuit(e);
+ let full_config = JSON.parse(
+ document.getElementById("exercise-config").textContent
+ );
+ let config = full_config[`ROS${ros_version}`][0];
+ config.application.params = { circuit: e.name };
+ config.launch_file = e.path;
+ config['exercise_id'] = exerciseId;
+ config["world"] = "gazebo";
+ config["visualization"] = "gazebo_rae";
+ config["world"] = "gazebo";
+ if (ros_version == 1) {
+ config["resource_folders"] = "$EXERCISE_FOLDER/launch/ros1_noetic";
+ }
+ if (ros_version == 2) {
+ config["resource_folders"] = "$EXERCISE_FOLDER/launch/ros2_humble";
+ }
+ config["model_folders"] = "$CUSTOM_ROBOTS_FOLDR/f1/models";
+ config["launch_file"] = e.path;
+ config["visualization"] = "gazebo_rae";
+ config.height = window.innerHeight / 2;
+ config.width = window.innerWidth / 2;
+ window.RoboticsExerciseComponents.commsManager.terminate().then(() => {
+ window.RoboticsReactComponents.MessageSystem.Loading.showLoading(
+ "Launching World in Robotics Backend"
+ );
+ window.RoboticsExerciseComponents.commsManager.launch(config).then(()=> {
+ RoboticsReactComponents.MessageSystem.Loading.hideLoading();
+ }).catch((e) => {
+ RoboticsReactComponents.MessageSystem.Loading.showFailLoading(
+ `Error launching the world:${e.data.message}. Try changing the world or reloading the page`
+ );
+ });
+ });
+ };
+
+ const [disabled, setDisabled] = useState(true);
+ const [circuitOptions, setCircuitOptions] = useState([]);
+ const [selectedCircuit, setSelectedCircuit] = useState("");
+
+
+ useEffect(() => {
+ const callback = (message) => {
+ if (message.data.state !== "connected") {
+ setDisabled(false);
+ } else {
+ setDisabled(true);
+ }
+ };
+ window.RoboticsExerciseComponents.commsManager.subscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.STATE_CHANGED],
+ callback
+ );
+
+ return () => {
+ window.RoboticsExerciseComponents.commsManager.unsubscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.STATE_CHANGED],
+ callback
+ );
+ };
+ }, []);
+
+ useEffect(() => {
+ const serverBase = `${document.location.protocol}//${document.location.hostname}:7164`;
+ let requestUrl = `${serverBase}/exercises/exercise/${exerciseId}/launch_files`;
+ const request = new Request(requestUrl, {
+ method: "POST",
+ headers: {
+ "Content-type": "application/json",
+ 'X-CSRFToken': context.csrf
+ },
+ })
+ fetch(request)
+ .then((response) => response.json())
+ .then((data) => {
+ const rosVersionURL = `${serverBase}/exercises/ros_version/`;
+ ros_version = 1;
+ fetch(rosVersionURL)
+ .then((res) => res.json())
+ .then((msg) => {
+ ros_version = msg.version;
+
+ if (isNaN(parseInt(ros_version))) {
+ ros_version = 1;
+ }
+ const config = data;
+ // Selects the configs available for the ROS version installed
+ const availableConfigs = {};
+ availableConfigs[`ROS${ros_version}`] = config[`ROS${ros_version}`];
+ console.log(availableConfigs);
+ setSelectedCircuit(availableConfigs[`ROS${ros_version}`][0]);
+ setCircuitOptions(availableConfigs[`ROS${ros_version}`])
+ context.mapSelected =
+ availableConfigs[`ROS${ros_version}`][0].name;
+ })
+ setCircuitOptions(data.launch);
+
+ })
+ .catch((error) => {
+ console.log("Error fetching circuit options:", error);
+ });
+ }, []);
+
+
+
+ return (
+
+
+
+ World
+
+
+
+
+ );
+}
diff --git a/exercises/static/exercises/human_detection_newmanager/react-components/SpecificFollowLine.js b/exercises/static/exercises/human_detection_newmanager/react-components/SpecificFollowLine.js
new file mode 100644
index 000000000..84c610fee
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/react-components/SpecificFollowLine.js
@@ -0,0 +1,53 @@
+import * as React from "react";
+import { Box } from "@mui/material";
+import "./css/GUICanvas.css";
+import { drawImage } from "./helpers/showImagesFollowLine";
+
+
+const SpecificFollowLine = (props) => {
+ const [image, setImage] = React.useState(null)
+ const canvasRef = React.useRef(null)
+
+ React.useEffect(() => {
+ console.log("TestShowScreen subscribing to ['update'] events");
+ const callback = (message) => {
+ if(message.data.update.image){
+ console.log('image')
+ const image = JSON.parse(message.data.update.image)
+ if(image.image){
+ drawImage(message.data.update)
+ }
+ }
+ };
+
+ window.RoboticsExerciseComponents.commsManager.subscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.UPDATE],
+ callback
+ );
+
+ return () => {
+ console.log("TestShowScreen unsubscribing from ['state-changed'] events");
+ window.RoboticsExerciseComponents.commsManager.unsubscribe(
+ [window.RoboticsExerciseComponents.commsManager.events.UPDATE],
+ callback
+ );
+ };
+ }, []);
+
+ return (
+
+
+
+ );
+};
+
+SpecificFollowLine.defaultProps = {
+ width: 800,
+ height: 600,
+};
+
+export default SpecificFollowLine
diff --git a/exercises/static/exercises/human_detection_newmanager/react-components/css/DigClassMain.css b/exercises/static/exercises/human_detection_newmanager/react-components/css/DigClassMain.css
new file mode 100644
index 000000000..3bdee7925
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/react-components/css/DigClassMain.css
@@ -0,0 +1,28 @@
+* {
+ box-sizing: border-box;
+}
+
+body, html {
+ width: 100%; height: 100%;
+}
+
+#exercise-container {
+ position: absolute;
+ top: 0; left: 0; bottom: 0; right: 0;
+ overflow: hidden;
+ display: flex;
+ flex-direction: column;
+}
+
+#exercise-container #content {
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+}
+
+#exercise-container #content #content-exercise {
+ display: flex;
+ flex-direction: column;
+ width: 100%;
+ height: 100%;
+}
\ No newline at end of file
diff --git a/exercises/static/exercises/human_detection_newmanager/react-components/css/GUICanvas.css b/exercises/static/exercises/human_detection_newmanager/react-components/css/GUICanvas.css
new file mode 100644
index 000000000..10997de2b
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/react-components/css/GUICanvas.css
@@ -0,0 +1,6 @@
+.exercise-canvas {
+ width: 100%;
+ height: 100%;
+ max-height: 100%;
+ background-color: #303030;
+}
diff --git a/exercises/static/exercises/human_detection_newmanager/react-components/helpers/showImagesFollowLine.js b/exercises/static/exercises/human_detection_newmanager/react-components/helpers/showImagesFollowLine.js
new file mode 100644
index 000000000..26737e626
--- /dev/null
+++ b/exercises/static/exercises/human_detection_newmanager/react-components/helpers/showImagesFollowLine.js
@@ -0,0 +1,33 @@
+// To decode the image string we will receive from server
+function decode_utf8(s){
+ return decodeURIComponent(escape(s))
+}
+
+let image = new Image();
+
+export function drawImage (data){
+ var canvas = document.getElementById("canvas"),
+ context = canvas.getContext('2d')
+
+ // For image object
+ image.onload = function(){
+ update_image();
+ }
+
+ // Request Animation Frame to remove the flickers
+ function update_image(){
+ window.requestAnimationFrame(update_image);
+ context.drawImage(image, 0, 0);
+ }
+
+ // Parse the Image Data
+ var image_data = JSON.parse(data.image),
+ source = decode_utf8(image_data.image),
+ shape = image_data.shape;
+
+ if(source != ""){
+ image.src = "data:image/jpeg;base64," + source;
+ canvas.width = shape[1];
+ canvas.height = shape[0];
+ }
+}
diff --git a/exercises/templates/exercises/dl_digit_classifier_newmanager/exercise.html b/exercises/templates/exercises/dl_digit_classifier_newmanager/exercise.html
new file mode 100644
index 000000000..225a913a7
--- /dev/null
+++ b/exercises/templates/exercises/dl_digit_classifier_newmanager/exercise.html
@@ -0,0 +1,46 @@
+{% extends "react_frontend/exercise_base.html" %}
+{% load react_component %}
+
+{% block exercise_header %}
+{% endblock %}
+
+{% block react-content %}
+ {% react_component exercise/dl_digit_classifier_newmanager/FollowLineRR %}
+
+ {% react_component components/wrappers/MaterialBox id="exercise-container" %}
+ {% if deployment %}
+ {% react_component components/layout_components/MainAppBar exerciseName="Digit Classifier" url="https://jderobot.github.io/RoboticsAcademy/exercises/ComputerVision/dl_digit_classifier"%}
+ {% react_component components/visualizers/WorldSelector %} {% end_react_component %}
+ {% end_react_component %}
+ {% else %}
+ {% react_component components/layout_components/MainAppBar exerciseName="Digit Classifier" url="https://jderobot.github.io/RoboticsAcademy/exercises/ComputerVision/dl_digit_classifier"%}
+ {% react_component components/visualizers/WorldSelector %} {% end_react_component %}
+ {% end_react_component %}
+ {% endif %}
+ {% react_component components/wrappers/MaterialBox id="content" %}
+ {% react_component components/wrappers/MaterialBox id="content-exercise" %}
+ {% if deployment %}
+ {% react_component components/layout_components/ExerciseControlExtra %}{% end_react_component %}
+ {% else %}
+ {% react_component components/layout_components/ExerciseControl %}{% end_react_component %}
+ {% endif %}
+ {% react_component components/wrappers/FlexContainer row %}
+ {% react_component components/wrappers/FlexContainer console %}
+ {% if deployment %}
+ {% react_component components/editors/AceEditorRobotExtra %}{% end_react_component %}
+ {% else %}
+ {% react_component components/editors/AceEditorRobot %}{% end_react_component %}
+ {% endif %}
+ {% react_component components/visualizers/ConsoleViewer%}{% end_react_component %}
+ {% end_react_component%}
+ {% react_component components/wrappers/FlexContainer%}
+ {% react_component exercise/follow_line_newmanager/SpecificFollowLine %}{% end_react_component %}
+ {% end_react_component %}
+ {% end_react_component %}
+ {% end_react_component %}
+ {% react_component components/message_system/Loading %}{% end_react_component %}
+ {% react_component components/message_system/Alert %}{% end_react_component %}
+{% end_react_component %}
+{% end_react_component %}
+ {% end_react_component %}
+{% endblock %}
\ No newline at end of file
diff --git a/exercises/templates/exercises/human_detection_newmanager/exercise.html b/exercises/templates/exercises/human_detection_newmanager/exercise.html
new file mode 100644
index 000000000..153f4bf89
--- /dev/null
+++ b/exercises/templates/exercises/human_detection_newmanager/exercise.html
@@ -0,0 +1,32 @@
+{% extends "react_frontend/exercise_base.html" %}
+{% load react_component %}
+
+{% block exercise_header %}
+{% endblock %}
+
+{% block react-content %}
+ {% react_component exercise/human_detection_newmanager/FollowLineRR %}
+
+ {% react_component components/wrappers/MaterialBox id="exercise-container" %}
+ {% react_component components/layout_components/MainAppBar exerciseName="Human Detection" url="https://jderobot.github.io/RoboticsAcademy/exercises/ComputerVision/human_detection"%}
+ {% react_component components/visualizers/WorldSelector %} {% end_react_component %}
+ {% end_react_component %}
+
+ {% react_component components/wrappers/MaterialBox id="content" %}
+ {% react_component components/wrappers/MaterialBox id="content-exercise" %}
+ {% react_component components/layout_components/ExerciseControl %}{% end_react_component %}
+ {% react_component components/wrappers/FlexContainer row %}
+ {% react_component components/wrappers/FlexContainer console %}
+ {% react_component components/editors/AceEditorRobot %}{% end_react_component %}
+ {% react_component components/visualizers/ConsoleViewer%}{% end_react_component %}
+ {% end_react_component%}
+ {% react_component exercise/human_detection_newmanager/SpecificFollowLine %}{% end_react_component %}
+
+ {% end_react_component %}
+ {% end_react_component %}
+ {% react_component components/message_system/Loading %}{% end_react_component %}
+ {% react_component components/message_system/Alert %}{% end_react_component %}
+{% end_react_component %}
+{% end_react_component %}
+ {% end_react_component %}
+{% endblock %}
\ No newline at end of file
diff --git a/scripts/develop_academy.sh b/scripts/develop_academy.sh
index 44f575a2e..3ef74139e 100644
--- a/scripts/develop_academy.sh
+++ b/scripts/develop_academy.sh
@@ -5,22 +5,24 @@ ram_version="https://github.com/JdeRobot/RoboticsApplicationManager.git"
branch="humble-devel"
radi_version="humble"
gpu_mode="false"
+nvidia="false"
compose_file="dev_humble_cpu"
# Loop through the arguments using a while loop
-while getopts ":r:b:i:g" opt; do
+while getopts ":r:b:i:g:n" opt; do
case $opt in
r) ram_version="$OPTARG" ;;
b) branch="$OPTARG" ;;
i) radi_version="$OPTARG" ;;
g) gpu_mode="true" ;;
+ n) nvidia="true" ;;
\?) echo "Invalid option: -$OPTARG" >&2 ;; # If an invalid option is provided, print an error message
esac
done
echo "RAM src: $ram_version"
echo "RAM branch: $branch"
-echo "RADI version: $radi_version"
+echo "RoboticsBackend version: $radi_version"
# Install docker-compose if not installed
if ! command -v docker-compose &> /dev/null; then
@@ -59,9 +61,16 @@ cd ..
if [ "$gpu_mode" = "true" ]; then
compose_file="dev_humble_gpu"
fi
+if [ "$nvidia" = "true" ]; then
+ compose_file="dev_humble_nvidia"
+fi
cp compose_cfg/$compose_file.yaml docker-compose.yaml
# Proceed with docker-compose commands
-docker compose up;
+if [ "$nvidia" = "true" ]; then
+ docker compose --compatibility up
+else
+ docker compose up
+fi
docker compose down;
rm docker-compose.yaml
diff --git a/scripts/run_academy.sh b/scripts/run_academy.sh
old mode 100755
new mode 100644
index 8b4f15aa8..7fb33c4ef
--- a/scripts/run_academy.sh
+++ b/scripts/run_academy.sh
@@ -2,14 +2,16 @@
# Default: cpu and offline
gpu_mode="false"
+nvidia="false"
base_path_offline="compose_cfg/"
compose_file="user_humble_cpu"
base_path_online="https://raw.githubusercontent.com/JdeRobot/RoboticsAcademy/humble-devel/compose_cfg/"
# Loop through the arguments using a while loop
-while getopts ":g " opt; do
+while getopts ":g:n " opt; do
case $opt in
g) gpu_mode="true" ;;
+ n) nvidia="true" ;;
\?) echo "Invalid option: -$OPTARG" >&2 ;; # If an invalid option is provided, print an error message
esac
done
@@ -18,6 +20,9 @@ done
if [ "$gpu_mode" = "true" ]; then
compose_file="user_humble_gpu"
fi
+if [ "$nvidia" = "true" ]; then
+ compose_file="dev_humble_nvidia"
+fi
# Check the mode
if [ -d compose_cfg ]; then