Skip to content

Commit

Permalink
Merge pull request #109 from siliataider/eliott-dev
Browse files Browse the repository at this point in the history
add tests
  • Loading branch information
Eliott-rjd authored Jan 18, 2024
2 parents 5aceb6e + fec2886 commit ea60c1d
Show file tree
Hide file tree
Showing 3 changed files with 107 additions and 16 deletions.
12 changes: 6 additions & 6 deletions backAgent/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,12 @@ async def read_socket(websocket):


# Run python backend as client
async def connect_to_websocket():
uri = "wss://citymanagerjava.onrender.com/websocket-endpoint"
async with websockets.connect(uri) as websocket:
while True:
# Envoyez et recevez des messages ici
await read_socket(websocket)
# async def connect_to_websocket():
# uri = "wss://citymanagerjava.onrender.com/websocket-endpoint"
# async with websockets.connect(uri) as websocket:
# while True:
# # Envoyez et recevez des messages ici
# await read_socket(websocket)

# Run python backend as server
async def listen_to_websocket():
Expand Down
91 changes: 91 additions & 0 deletions backAgent/tests/test_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import pytest
import numpy as np
from classes.Agent.AgentDQLearning import AgentDQLearning
from classes.SimulationConditions import SimulationConditions
from classes.Agent.AgentQLearning import AgentQLearning
from classes.AgentEnvironment import AgentEnvironment
from resources.variables import DISCOUNT_FACTOR, START_EXPLORATION_RATE

@pytest.fixture
def agent_q_learning():
env = AgentEnvironment(timestamp=8, weather=0)

# Créez une instance de AgentQLearning avec des valeurs fictives pour les paramètres nécessaires.
return AgentQLearning(num_states=108, num_actions=3, env=env, agent_id=1)

@pytest.fixture
def agent_dq_learning():
env = AgentEnvironment(timestamp=8, weather=0)

# Créez une instance de AgentQLearning avec des valeurs fictives pour les paramètres nécessaires.
return AgentDQLearning(num_states=108, num_actions=3, env=env, agent_id=1)


@pytest.fixture
def simulationConditions():
return SimulationConditions(exploration_rate=1)


def test_choose_action_explotation_Q_learning(agent_q_learning):
# Testez le choix d'action en exploration
state_value = (8, 0, 0.5, 0.5, 0.5)
exploration_rate = 0
action = agent_q_learning.choose_action(state_value, exploration_rate)

#Table vide donc choisit la premiere action
assert action == 0


def test_train_model_Q_learning(agent_q_learning):
# Testez le choix d'action en exploration
state_value = (8, 0, 0.5, 0.5, 0.5)
next_state_value = (9, 0, 0.6, 0.5, 0.4)
action = 0
reward = 15
learning_rate = 0.1
expected_array = [[0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [1.5, 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]

agent_q_learning.train_model(state_value, action, reward, next_state_value, learning_rate, DISCOUNT_FACTOR)
assert np.array_equal(agent_q_learning.q_table, np.array(expected_array))

def test_train_agent(simulationConditions, agent_q_learning, monkeypatch):
agent_q_learning.env.state_value = (8, 0, 0.5, 0.5, 0.5)
simulationConditions.timestamp = 9
simulationConditions.weather = 0

def mock_action(arr):
return 0
monkeypatch.setattr(np.random, 'choice', mock_action)

agent_update, action = agent_q_learning.train(simulationConditions)

assert agent_q_learning.env.state_value == (9, 0, 0.475, 0.55, 0.5)


20 changes: 10 additions & 10 deletions backAgent/tests/test_server_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@ def test_one_plus_one_equals_two():
assert 1 + 1 == 2


@pytest.mark.asyncio
async def test_server_connection():
try:
# Attempt to connect to the WebSocket server
async with websockets.connect("wss://citymanagerpython.onrender.com") as websocket:
# If the connection is successful, pass the test
pass
except Exception as e:
# If the connection fails, fail the test
pytest.fail(f"WebSocket connection failed: {e}")
# @pytest.mark.asyncio
# async def test_server_connection():
# try:
# # Attempt to connect to the WebSocket server
# async with websockets.connect("wss://citymanagerpython.onrender.com") as websocket:
# # If the connection is successful, pass the test
# pass
# except Exception as e:
# # If the connection fails, fail the test
# pytest.fail(f"WebSocket connection failed: {e}")

0 comments on commit ea60c1d

Please sign in to comment.