diff --git a/docs/Performance.ipynb b/docs/Performance.ipynb index 68e0dd0c..2f99e873 100644 --- a/docs/Performance.ipynb +++ b/docs/Performance.ipynb @@ -140,14 +140,21 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 1, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "jax JIT compilation: 0.15200079999999616\n", - "jax execution average: 0.019547907575757667\n" + "jax JIT compilation: 0.1478613000000002\n", + "jax execution average: 0.011604835279285908\n" ] } ], @@ -207,17 +214,17 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 3, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - ":11: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + ":12: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", " dist = torch.sqrt(torch.maximum(torch.sum(deltas ** 2, -1), torch.tensor(1e-4))) # eps=1e-4 to avoid NaN during backprop of sqrt\n", - ":19: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + ":20: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", " x_inc_contrib = torch.sum(torch.where(has_impact.unsqueeze(-1), torch.minimum(impact_time.unsqueeze(-1) - dt, torch.tensor(0.0)) * impulse, torch.tensor(0.0)), -2)\n", - ":21: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", + ":22: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.\n", " v += torch.sum(torch.where(has_impact.unsqueeze(-1), impulse, torch.tensor(0.0)), -2)\n" ] }, @@ -225,20 +232,21 @@ "name": "stdout", "output_type": "stream", "text": [ - "torch JIT compilation: 0.05920939892530441\n", - "torch execution average: 0.045233648270368576\n" + "torch JIT compilation: 0.06732069700956345\n", + "torch execution average: 0.0452803298830986\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - ":44: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", - " print(f\"torch execution average: {torch.mean(torch.tensor(dt[2:]))}\")\n" + ":45: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " print(f\"torch execution average: {torch.mean(torch.tensor(dt_torch[2:]))}\")\n" ] } ], "source": [ + "import time\n", "import torch\n", "\n", "\n", @@ -282,7 +290,7 @@ "v_trj = torch.stack(v_trj)\n", "dt_torch = torch.tensor(dt_torch)\n", "print(f\"torch JIT compilation: {dt_torch[0]}\")\n", - "print(f\"torch execution average: {torch.mean(torch.tensor(dt[2:]))}\")" + "print(f\"torch execution average: {torch.mean(torch.tensor(dt_torch[2:]))}\")" ], "metadata": { "collapsed": false, @@ -305,6 +313,7 @@ } ], "source": [ + "import time\n", "import tensorflow as tf\n", "\n", "@tf.function\n", @@ -376,7 +385,7 @@ "| Jax | 19.5 | 16.1 |\n", "| TensorFlow | 23.6 | 24.3 |\n", "\n", - "Here, Φ-ML beats the performance of our native PyTorch and Jax implementation and is on-par with TensorFlow.\n", + "Here, Φ-ML beats the performance of our native PyTorch implementation and is on-par with TensorFlow and Jax.\n", "As discussed above, all extra code of Φ-ML is completely optimized out during JIT compilation, resulting in similar compiled code.\n", "\n", "The fact that Φ-ML is faster than PyTorch may be down to some inefficiency in the native PyTorch implementation above.\n",