From e7e5a7473a30ff544ba47fe31d4991da795f006f Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 29 May 2024 16:11:28 +0200 Subject: [PATCH 01/32] doc and renamed gaussian splatting --- meshrooPlugin.json | 10 + mrrs/gaussian_splatting/Dockerfile | 24 ++ mrrs/gaussian_splatting/GaussianSplatting.py | 226 ++++++++++++++++++ .../GaussianSplattingRender.py | 71 ++++++ mrrs/gaussian_splatting/__init__.py | 0 5 files changed, 331 insertions(+) create mode 100644 meshrooPlugin.json create mode 100644 mrrs/gaussian_splatting/Dockerfile create mode 100644 mrrs/gaussian_splatting/GaussianSplatting.py create mode 100644 mrrs/gaussian_splatting/GaussianSplattingRender.py create mode 100644 mrrs/gaussian_splatting/__init__.py diff --git a/meshrooPlugin.json b/meshrooPlugin.json new file mode 100644 index 0000000..2260529 --- /dev/null +++ b/meshrooPlugin.json @@ -0,0 +1,10 @@ +[ + { + "pluginName":"Gaussian Splatting", + "nodesFolder":"gaussian_splat" + }, + { + "pluginName":"Dummy", + "nodesFolder":"dummy" + } +] \ No newline at end of file diff --git a/mrrs/gaussian_splatting/Dockerfile b/mrrs/gaussian_splatting/Dockerfile new file mode 100644 index 0000000..6dbd77f --- /dev/null +++ b/mrrs/gaussian_splatting/Dockerfile @@ -0,0 +1,24 @@ +### Need to have docker with nvida-containeer-toolkit installled +### Build with 'docker build . -t gs' +### Run container with 'docker run --rm --runtime=nvidia --gpus all -it gs' + +## Base image and git +FROM pytorch/pytorch:1.13.1-cuda11.6-cudnn8-devel as build1 +RUN apt update && apt -y upgrade +RUN apt install -y git + +# Cloning GS repo +RUN git clone https://github.com/graphdeco-inria/gaussian-splatting --recursive + +#installing GS deps +RUN pip install plyfile tqdm ninja + +#needed to build the rasterisation +#https://www.data-mining.co.nz/docker-for-data-scientists/troubleshooting/ +ARG TORCH_CUDA_ARCH_LIST="Pascal;Volta;Turing;Ampere" +ENV TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}" + +# Build rasteriser and knn +RUN pip install gaussian-splatting/submodules/diff-gaussian-rasterization +RUN pip install gaussian-splatting/submodules/simple-knn + diff --git a/mrrs/gaussian_splatting/GaussianSplatting.py b/mrrs/gaussian_splatting/GaussianSplatting.py new file mode 100644 index 0000000..28adcd4 --- /dev/null +++ b/mrrs/gaussian_splatting/GaussianSplatting.py @@ -0,0 +1,226 @@ + +import os +from meshroom.core import desc +from meshroom.core.plugin import DockerNode +from distutils.dir_util import copy_tree + + +from collections import namedtuple +from trimesh.exchange.ply import _parse_header, _ply_binary +import numpy as np + +def sigmoid(x): + return 1 / (1 + np.exp(-x)) + +def load_gs_ply(path, max_sh_degree=3): + """ + (modified from original repo) + """ + with open(path, 'rb') as f: + elements, _, _ = _parse_header(f) + _ply_binary(elements, f) + + xyz = np.stack((np.asarray(elements['vertex']['data']["x"]), + np.asarray(elements['vertex']['data']["y"]), + np.asarray(elements['vertex']['data']["z"])), axis=1) + opacities = np.asarray(elements['vertex']['data']["opacity"])[..., np.newaxis] + #aaply activation + opacities = sigmoid(opacities) + + features_dc = np.zeros((xyz.shape[0], 3, 1)) + features_dc[:, 0, 0] = np.asarray(elements['vertex']['data']["f_dc_0"]) + features_dc[:, 1, 0] = np.asarray(elements['vertex']['data']["f_dc_1"]) + features_dc[:, 2, 0] = np.asarray(elements['vertex']['data']["f_dc_2"]) + + extra_f_names = [p for p in elements['vertex']['properties'] if p.startswith("f_rest_")] + extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) + assert len(extra_f_names)==3*(max_sh_degree + 1) ** 2 - 3 + features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) + for idx, attr_name in enumerate(extra_f_names): + features_extra[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) + features_extra = features_extra.reshape((features_extra.shape[0], 3, (max_sh_degree + 1) ** 2 - 1)) + + scale_names = [p for p in elements['vertex']['properties']if p.startswith("scale_")] + scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) + scales = np.zeros((xyz.shape[0], len(scale_names))) + for idx, attr_name in enumerate(scale_names): + scales[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + #scaling activation + scales=np.exp(scales) + + + + rot_names = [p for p in elements['vertex']['properties']if p.startswith("rot")] + rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) + rots = np.zeros((xyz.shape[0], len(rot_names))) + for idx, attr_name in enumerate(rot_names): + rots[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + + + return xyz, rots, scales, \ + opacities, features_dc, features_extra + + +def rgb_from_sh(sh_0): + """ + Get RGB values for sh coef 0 (solid color) + """ + C0 = 0.28209479177387814 + result = C0 * sh_0 + 0.5 + result = np.clip(result, 0, 1) + return result + +class GaussianSplatting(DockerNode): + + category = 'GaussianSplatting' + documentation = '''Node to optimise gaussian splats from a set of input views and poses.''' + gpu = desc.Level.INTENSIVE + + commandLine = "python gaussian-splatting/train.py -s /node_folder/input -m /node_folder/output {allParams} \ + --save_iterations {iterationsValue}"# --test_iterations {iterationsValue} " + + envFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Dockerfile')) + + inputs = [ + desc.File( + name="inputColmapFolder", + label="inputColmapFolder", + description="inputColmapFolder", + value="", + uid=[0], + group="" + ), + desc.StringParam( + name="resolution", + label="resolution", + description="Specifies resolution of the loaded images before training. If provided 1, 2, 4 or 8, uses original, 1/2, 1/4 or 1/8 resolution, respectively. For all other values, rescales the width to the given number while maintaining image aspect. If not set and input image width exceeds 1.6K pixels, inputs are automatically rescaled to this target.", + value="1", + uid=[0], + ), + desc.IntParam( + name="sh_degree", + label="sh_degree", + description="Order of spherical harmonics to be used (no larger than 3). 3 by default.", + value=3, + range=(0, 3, 1), + uid=[0], + ), + desc.IntParam( + name="iterations", + label="iterations", + description="Number of total iterations to train for, 30000 by default.", + value=30000, + range=(0, 100000, 1), + uid=[0], + ), + + # desc.ChoiceParam( + # name="data_device", + # label="data_device", + # description="Specifies where to put the source image data, cuda by default, recommended to use cpu if training on large/high-resolution dataset, will reduce VRAM consumption, but slightly slow down training. Thanks to HrsPythonix.", + # value="cuda", + # values=["cuda","cpu"], + # uid=[0], + # advanced=True + # ), + + # --feature_lr + # Spherical harmonics features learning rate, 0.0025 by default. + # --opacity_lr + # Opacity learning rate, 0.05 by default. + # --scaling_lr + # Scaling learning rate, 0.005 by default. + # --rotation_lr + # Rotation learning rate, 0.001 by default. + # --position_lr_max_steps + # Number of steps (from 0) where position learning rate goes from initial to final. 30_000 by default. + # --position_lr_init + # Initial 3D position learning rate, 0.00016 by default. + # --position_lr_final + # Final 3D position learning rate, 0.0000016 by default. + # --position_lr_delay_mult + # Position learning rate multiplier (cf. Plenoxels), 0.01 by default. + + # --densify_from_iter + # Iteration where densification starts, 500 by default. + # --densify_until_iter + # Iteration where densification stops, 15_000 by default. + # --densify_grad_threshold + # Limit that decides if points should be densified based on 2D position gradient, 0.0002 by default. + # --densification_interval + # How frequently to densify, 100 (every 100 iterations) by default. + # --percent_dense + # Percentage of scene extent (0--1) a point must exceed to be forcibly densified, 0.01 by default. + + # --opacity_reset_interval + # How frequently to reset opacity, 3_000 by default. + + # --lambda_dssim + # Influence of SSIM on total loss from 0 to 1, 0.2 by default. + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[0], + group="" + ) + ] + + outputs = [ + desc.File( + name='outputFolder', + label='OutputFolder', + description='Output folder.', + value=os.path.join(desc.Node.internalFolder, "output"), + uid=[], + group="", + ), + desc.File( + name='meshPreview', + label='meshPreview', + description='meshPreview', + value=os.path.join(desc.Node.internalFolder, "preview_mesh.ply"), + uid=[], + group="", + ) + ] + + def processChunk(self, chunk): + #copy input data to node's folder (we mount only this folder) + input_folder = os.path.join(chunk.node.internalFolder, 'input') + output_folder = os.path.join(chunk.node.internalFolder, 'output') + os.makedirs(input_folder) + os.makedirs(output_folder) + copy_tree(os.path.join(chunk.node.inputColmapFolder.value,'sparse'), os.path.join(input_folder, 'sparse', '0')) + copy_tree(os.path.join(chunk.node.inputColmapFolder.value,'images'), os.path.join(input_folder, 'images')) + #run the process + super().processChunk(chunk) + + # create 3D display + output_mesh = os.path.join(output_folder, "point_cloud", "iteration_%d"%chunk.node.iterations.value, "point_cloud.ply") + + import trimesh + from trimesh.transformations import compose_matrix + from trimesh.creation import icosphere + gaussians = load_gs_ply(output_mesh) + + meshes = [] + for i, (c, r, s, o, fdc, fe) in enumerate(zip(*gaussians)): + if not (i%10==0): + continue + print("%d/%d"%(i, gaussians[0].shape[0])) + rgb=np.squeeze(rgb_from_sh(fdc)) + rgba=255*np.concatenate([rgb,o]) + unit_sphere = icosphere(subdivisions=1) + unit_sphere.visual.face_colors[:] = np.array(rgba) + unit_sphere.visual.vertex_colors[:] = np.array(rgba) + transform = compose_matrix(scale=s, angles=r, translate=c) + unit_sphere.apply_transform(transform) + meshes.append(unit_sphere) + preview_mesh = trimesh.util.concatenate(meshes) + preview_mesh.export(chunk.node.meshPreview.value) diff --git a/mrrs/gaussian_splatting/GaussianSplattingRender.py b/mrrs/gaussian_splatting/GaussianSplattingRender.py new file mode 100644 index 0000000..5f735de --- /dev/null +++ b/mrrs/gaussian_splatting/GaussianSplattingRender.py @@ -0,0 +1,71 @@ + +import os +from meshroom.core import desc +from meshroom.core.plugin import DockerNode +from distutils.dir_util import copy_tree +from shutil import move + +class GaussianSplattingRender(DockerNode): + + category = 'GaussianSplatting' + documentation = '''Node to render frames from a .sfm and the optimised gaussian splats.''' + gpu = desc.Level.INTENSIVE + + commandLine = "python gaussian-splatting/render.py -s /node_folder/input_scene -m /node_folder/input_model" + + envFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Dockerfile')) + + inputs = [ + desc.File( + name="inputModelFolder", + label="inputModelFolder", + description="inputModelFolder", + value="", + uid=[0], + group="" + ), + desc.File( + name="inputColmapFolder", + label="inputColmapFolder", + description="inputColmapFolder", + value="", + uid=[0], + group="" + ), + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[0], + ) + ] + + outputs = [ + desc.File( + name='outputFolder', + label='OutputFolder', + description='Output folder.', + value=os.path.join(desc.Node.internalFolder, "output"), + uid=[], + group="", + ), + ] + + def processChunk(self, chunk): + #copy input data to node's folder (we mount only this folder) + input_folder = os.path.join(chunk.node.internalFolder, 'input_scene') + input_folder_model = os.path.join(chunk.node.internalFolder, 'input_model') + output_folder = os.path.join(chunk.node.internalFolder, 'output') + os.makedirs(input_folder) + os.makedirs(input_folder_model) + os.makedirs(output_folder) + + #copy colmap scene and pretrained model + copy_tree(os.path.join(chunk.node.inputColmapFolder.value,'sparse'), os.path.join(input_folder, 'sparse', '0')) + copy_tree(os.path.join(chunk.node.inputColmapFolder.value,'images'), os.path.join(input_folder, 'images')) + copy_tree(chunk.node.inputModelFolder.value, input_folder_model) + + super().processChunk(chunk) \ No newline at end of file diff --git a/mrrs/gaussian_splatting/__init__.py b/mrrs/gaussian_splatting/__init__.py new file mode 100644 index 0000000..e69de29 From a959c64f0e6a6bc5dcebdc7b8aef645e2c8dc11a Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 29 May 2024 16:49:09 +0200 Subject: [PATCH 02/32] modified mvsnet for plugin --- .gitmodules | 3 +++ .../vismvsnet => deep_depth_map}/Vis-MVSNet | 0 .../{nodes/depth_map => deep_depth_map}/VizMVSNet.py | 12 ++++++------ mrrs/{nodes/depth_map => deep_depth_map}/__init__.py | 0 .../vismvsnet => deep_depth_map}/env.yaml | 0 mrrs/depth_maps/vismvsnet/__init__.py | 4 ---- 6 files changed, 9 insertions(+), 10 deletions(-) rename mrrs/{depth_maps/vismvsnet => deep_depth_map}/Vis-MVSNet (100%) rename mrrs/{nodes/depth_map => deep_depth_map}/VizMVSNet.py (97%) rename mrrs/{nodes/depth_map => deep_depth_map}/__init__.py (100%) rename mrrs/{depth_maps/vismvsnet => deep_depth_map}/env.yaml (100%) delete mode 100644 mrrs/depth_maps/vismvsnet/__init__.py diff --git a/.gitmodules b/.gitmodules index 36923da..81e1da5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,3 +7,6 @@ [submodule "mrrs/depth_maps/vismvsnet/Vis-MVSNet"] path = mrrs/depth_maps/vismvsnet/Vis-MVSNet url = https://github.com/jzhangbs/Vis-MVSNet +[submodule "mrrs/deep_depth_map/Vis-MVSNet"] + path = mrrs/deep_depth_map/Vis-MVSNet + url = https://github.com/jzhangbs/Vis-MVSNet diff --git a/mrrs/depth_maps/vismvsnet/Vis-MVSNet b/mrrs/deep_depth_map/Vis-MVSNet similarity index 100% rename from mrrs/depth_maps/vismvsnet/Vis-MVSNet rename to mrrs/deep_depth_map/Vis-MVSNet diff --git a/mrrs/nodes/depth_map/VizMVSNet.py b/mrrs/deep_depth_map/VizMVSNet.py similarity index 97% rename from mrrs/nodes/depth_map/VizMVSNet.py rename to mrrs/deep_depth_map/VizMVSNet.py index 8d5cd02..c175555 100644 --- a/mrrs/nodes/depth_map/VizMVSNet.py +++ b/mrrs/deep_depth_map/VizMVSNet.py @@ -4,11 +4,13 @@ import cv2 from meshroom.core import desc +from meshroom.core.plugin import CondaNode + from mrrs.core.ios import matrices_from_sfm_data, open_depth_map, save_exr -from mrrs.core.CondaNode import CondaNode from mrrs.core.utils import format_float_array -from mrrs.depth_maps.vismvsnet import ENV_FILE, EXEC, MODEL_PATH +EXEC = "python "+ os.path.join(os.path.dirname(__file__), "Vis-MVSNet/test.py") +MODEL_PATH = os.path.join(os.path.dirname(__file__), "Vis-MVSNet/pretrained_model/vis") # parser.add_argument('--data_root', type=str, help='The root dir of the data.') # parser.add_argument('--dataset_name', type=str, default='tanksandtemples', help='The name of the dataset. Should be identical to the dataloader source file. e.g. blended refers to data/blended.py.') @@ -34,7 +36,7 @@ class VizMVSNet(CondaNode): - category = 'Meshroom Research' + category = 'VizMVSNet' documentation = ''' ''' gpu = desc.Level.INTENSIVE @@ -43,9 +45,7 @@ class VizMVSNet(CondaNode): #overides the env path - @property - def env_file(self): - return ENV_FILE + envfile = os.path.join(os.path.dirname(__file__), 'env.yaml') inputs = [ desc.File( diff --git a/mrrs/nodes/depth_map/__init__.py b/mrrs/deep_depth_map/__init__.py similarity index 100% rename from mrrs/nodes/depth_map/__init__.py rename to mrrs/deep_depth_map/__init__.py diff --git a/mrrs/depth_maps/vismvsnet/env.yaml b/mrrs/deep_depth_map/env.yaml similarity index 100% rename from mrrs/depth_maps/vismvsnet/env.yaml rename to mrrs/deep_depth_map/env.yaml diff --git a/mrrs/depth_maps/vismvsnet/__init__.py b/mrrs/depth_maps/vismvsnet/__init__.py deleted file mode 100644 index f743c2b..0000000 --- a/mrrs/depth_maps/vismvsnet/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -import os -ENV_FILE = os.path.join(os.path.dirname(__file__), 'env.yaml') -EXEC = "python "+ os.path.join(os.path.dirname(__file__), "Vis-MVSNet/test.py") -MODEL_PATH = os.path.join(os.path.dirname(__file__), "Vis-MVSNet/pretrained_model/vis") From 903e59126552a0d17a5bf21449656b46eb81c04e Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 29 May 2024 17:14:41 +0200 Subject: [PATCH 03/32] massive reorganisation --- meshrooPlugin.json | 4 + .../CalibrationComparison.py | 0 .../benchmark => 3DR_benchmark}/CleanMesh.py | 0 .../DepthMapComparison.py | 0 .../LoadDataset.py | 0 .../MeshComparison.py | 0 mrrs/{metrics => 3DR_benchmark}/__init__.py | 0 mrrs/{ => 3DR_benchmark}/datasets/__init__.py | 0 mrrs/{ => 3DR_benchmark}/datasets/alab.py | 0 mrrs/{ => 3DR_benchmark}/datasets/baptiste.py | 0 .../datasets/blendedMVG.py | 0 mrrs/{ => 3DR_benchmark}/datasets/dtu.py | 0 mrrs/{ => 3DR_benchmark}/datasets/eth3d.py | 0 mrrs/{ => 3DR_benchmark}/datasets/nerf.py | 0 .../datasets/reality_capture.py | 0 .../metrics}/__init__.py | 0 .../metrics/chamfer_distance/__init__.py | 0 .../metrics/chamfer_distance/clean_mesh.py | 0 .../chamfer_distance/environnement.yaml | 0 .../metrics/chamfer_distance/eval_pcd.py | 0 .../chamfer_distance/make_gt_surface.py | 0 .../remove_invisible_faces.py | 0 .../metrics/chamfer_distance/utils.py | 0 .../metrics/dtu/__init__.py | 0 .../metrics/dtu/dtu_eval.py | 0 .../metrics/dtu/environnement.yaml | 0 mrrs/{ => 3DR_benchmark}/metrics/metrics.py | 0 mrrs/{nodes => }/README.md | 0 .../render/CreateTrackingMarkers.py | 0 mrrs/{nodes => blender}/render/Render360.py | 0 mrrs/{nodes => blender}/render/RenderMesh.py | 0 .../render/RenderOverlay.py | 0 .../render/SyntheticDataset.py | 0 mrrs/{nodes => blender}/render/__init__.py | 0 .../colmap/AutomaticReconstructor.py | 0 .../colmap/Colmap2MeshroomSfmConvertions.py | 0 mrrs/{nodes => }/colmap/DelaunayMesher.py | 0 mrrs/{nodes => }/colmap/FeatureExtraction.py | 0 mrrs/{nodes => }/colmap/FeatureMatching.py | 0 mrrs/{nodes => }/colmap/ImageUndistorder.py | 0 .../colmap/ImportColmapDepthMaps.py | 0 mrrs/{nodes => }/colmap/Mapper.py | 0 .../colmap/Meshroom2ColmapSfmConvertions.py | 0 mrrs/{nodes => }/colmap/PatchMatchStereo.py | 0 mrrs/{nodes => }/colmap/PoissonMesher.py | 0 mrrs/{nodes => }/colmap/StereoFusion.py | 0 mrrs/{nodes => }/colmap/__init__.py | 0 mrrs/core/CondaNode.py | 77 ------------------- mrrs/{nodes => }/nerf/nerfstudio.py | 0 mrrs/{nodes => }/nerf/nerfstudio_export.py | 0 .../stereo_photometry/Uni_MS_PS/__init__.py | 0 mrrs/nodes/utils/__init__.py | 0 mrrs/{nodes => }/reality_capture/ExportXMP.py | 0 mrrs/{nodes => }/reality_capture/ImportXMP.py | 0 .../benchmark => reality_capture}/__init__.py | 0 .../stereo_photometry/MS_PS/MPS_NET.py | 0 .../stereo_photometry/MS_PS/MS_PS.py | 0 .../stereo_photometry/MS_PS/NENet.py | 0 .../MS_PS}/__init__.py | 0 .../stereo_photometry/MS_PS/conv.py | 0 .../MS_PS/feature_extractor.py | 0 .../stereo_photometry/MS_PS/launch.py | 0 .../stereo_photometry/MS_PS/utils.py | 0 .../stereo_photometry/MS_PS/weight/.gitkeep | 0 .../stereo_photometry/MS_PS/weight_url.txt | 0 .../Uni_MS_PS/Transformer_8.py | 0 .../Uni_MS_PS/Transformer_8_layer.py | 0 .../Uni_MS_PS/Transformer_multi_res_7.py | 0 .../stereo_photometry/Uni_MS_PS/Uni_MS_PS.py | 0 .../Uni_MS_PS}/__init__.py | 0 .../stereo_photometry/Uni_MS_PS/deppading.py | 0 .../Uni_MS_PS/inference_file.py | 0 .../stereo_photometry/Uni_MS_PS/launch.py | 0 .../stereo_photometry/Uni_MS_PS/run.py | 0 .../Uni_MS_PS/transformer_modules.py | 0 .../stereo_photometry/Uni_MS_PS/utils.py | 0 .../Uni_MS_PS/utils_process.py | 0 .../Uni_MS_PS/weight/.gitkeep | 0 .../Uni_MS_PS/weight_url.txt | 0 mrrs/{nodes => }/utils/CalibTransform.py | 0 mrrs/{nodes => }/utils/ComputeNormals.py | 0 mrrs/{nodes => }/utils/ConvertImages.py | 0 mrrs/{nodes => }/utils/CopyData.py | 0 mrrs/{nodes => }/utils/CutSfm.py | 0 mrrs/{nodes => }/utils/DepthMapTransform.py | 0 mrrs/{nodes => }/utils/ExecuteCmdConda.py | 0 mrrs/{nodes => }/utils/InjectSfmData.py | 0 mrrs/{nodes => }/utils/MeshTransform.py | 0 mrrs/{nodes => }/utils/Seq2Video.py | 0 .../MS_PS => utils}/__init__.py | 0 90 files changed, 4 insertions(+), 77 deletions(-) rename mrrs/{nodes/benchmark => 3DR_benchmark}/CalibrationComparison.py (100%) rename mrrs/{nodes/benchmark => 3DR_benchmark}/CleanMesh.py (100%) rename mrrs/{nodes/benchmark => 3DR_benchmark}/DepthMapComparison.py (100%) rename mrrs/{nodes/benchmark => 3DR_benchmark}/LoadDataset.py (100%) rename mrrs/{nodes/benchmark => 3DR_benchmark}/MeshComparison.py (100%) rename mrrs/{metrics => 3DR_benchmark}/__init__.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/__init__.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/alab.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/baptiste.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/blendedMVG.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/dtu.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/eth3d.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/nerf.py (100%) rename mrrs/{ => 3DR_benchmark}/datasets/reality_capture.py (100%) rename mrrs/{nodes => 3DR_benchmark/metrics}/__init__.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/__init__.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/clean_mesh.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/environnement.yaml (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/eval_pcd.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/make_gt_surface.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/remove_invisible_faces.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/chamfer_distance/utils.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/dtu/__init__.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/dtu/dtu_eval.py (100%) rename mrrs/{ => 3DR_benchmark}/metrics/dtu/environnement.yaml (100%) rename mrrs/{ => 3DR_benchmark}/metrics/metrics.py (100%) rename mrrs/{nodes => }/README.md (100%) rename mrrs/{nodes => blender}/render/CreateTrackingMarkers.py (100%) rename mrrs/{nodes => blender}/render/Render360.py (100%) rename mrrs/{nodes => blender}/render/RenderMesh.py (100%) rename mrrs/{nodes => blender}/render/RenderOverlay.py (100%) rename mrrs/{nodes => blender}/render/SyntheticDataset.py (100%) rename mrrs/{nodes => blender}/render/__init__.py (100%) rename mrrs/{nodes => }/colmap/AutomaticReconstructor.py (100%) rename mrrs/{nodes => }/colmap/Colmap2MeshroomSfmConvertions.py (100%) rename mrrs/{nodes => }/colmap/DelaunayMesher.py (100%) rename mrrs/{nodes => }/colmap/FeatureExtraction.py (100%) rename mrrs/{nodes => }/colmap/FeatureMatching.py (100%) rename mrrs/{nodes => }/colmap/ImageUndistorder.py (100%) rename mrrs/{nodes => }/colmap/ImportColmapDepthMaps.py (100%) rename mrrs/{nodes => }/colmap/Mapper.py (100%) rename mrrs/{nodes => }/colmap/Meshroom2ColmapSfmConvertions.py (100%) rename mrrs/{nodes => }/colmap/PatchMatchStereo.py (100%) rename mrrs/{nodes => }/colmap/PoissonMesher.py (100%) rename mrrs/{nodes => }/colmap/StereoFusion.py (100%) rename mrrs/{nodes => }/colmap/__init__.py (100%) delete mode 100644 mrrs/core/CondaNode.py rename mrrs/{nodes => }/nerf/nerfstudio.py (100%) rename mrrs/{nodes => }/nerf/nerfstudio_export.py (100%) delete mode 100644 mrrs/nodes/stereo_photometry/Uni_MS_PS/__init__.py delete mode 100644 mrrs/nodes/utils/__init__.py rename mrrs/{nodes => }/reality_capture/ExportXMP.py (100%) rename mrrs/{nodes => }/reality_capture/ImportXMP.py (100%) rename mrrs/{nodes/benchmark => reality_capture}/__init__.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/MPS_NET.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/MS_PS.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/NENet.py (100%) rename mrrs/{nodes/nerf => stereo_photometry/MS_PS}/__init__.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/conv.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/feature_extractor.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/launch.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/utils.py (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/weight/.gitkeep (100%) rename mrrs/{nodes => }/stereo_photometry/MS_PS/weight_url.txt (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/Transformer_8.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/Transformer_8_layer.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/Transformer_multi_res_7.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/Uni_MS_PS.py (100%) rename mrrs/{nodes/reality_capture => stereo_photometry/Uni_MS_PS}/__init__.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/deppading.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/inference_file.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/launch.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/run.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/transformer_modules.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/utils.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/utils_process.py (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/weight/.gitkeep (100%) rename mrrs/{nodes => }/stereo_photometry/Uni_MS_PS/weight_url.txt (100%) rename mrrs/{nodes => }/utils/CalibTransform.py (100%) rename mrrs/{nodes => }/utils/ComputeNormals.py (100%) rename mrrs/{nodes => }/utils/ConvertImages.py (100%) rename mrrs/{nodes => }/utils/CopyData.py (100%) rename mrrs/{nodes => }/utils/CutSfm.py (100%) rename mrrs/{nodes => }/utils/DepthMapTransform.py (100%) rename mrrs/{nodes => }/utils/ExecuteCmdConda.py (100%) rename mrrs/{nodes => }/utils/InjectSfmData.py (100%) rename mrrs/{nodes => }/utils/MeshTransform.py (100%) rename mrrs/{nodes => }/utils/Seq2Video.py (100%) rename mrrs/{nodes/stereo_photometry/MS_PS => utils}/__init__.py (100%) diff --git a/meshrooPlugin.json b/meshrooPlugin.json index 2260529..b0951d2 100644 --- a/meshrooPlugin.json +++ b/meshrooPlugin.json @@ -1,5 +1,9 @@ [ { + "pluginName":"3DR Benchmark", + "nodesFolder":"3DR_benchmark" + }, + { "pluginName":"Gaussian Splatting", "nodesFolder":"gaussian_splat" }, diff --git a/mrrs/nodes/benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py similarity index 100% rename from mrrs/nodes/benchmark/CalibrationComparison.py rename to mrrs/3DR_benchmark/CalibrationComparison.py diff --git a/mrrs/nodes/benchmark/CleanMesh.py b/mrrs/3DR_benchmark/CleanMesh.py similarity index 100% rename from mrrs/nodes/benchmark/CleanMesh.py rename to mrrs/3DR_benchmark/CleanMesh.py diff --git a/mrrs/nodes/benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py similarity index 100% rename from mrrs/nodes/benchmark/DepthMapComparison.py rename to mrrs/3DR_benchmark/DepthMapComparison.py diff --git a/mrrs/nodes/benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py similarity index 100% rename from mrrs/nodes/benchmark/LoadDataset.py rename to mrrs/3DR_benchmark/LoadDataset.py diff --git a/mrrs/nodes/benchmark/MeshComparison.py b/mrrs/3DR_benchmark/MeshComparison.py similarity index 100% rename from mrrs/nodes/benchmark/MeshComparison.py rename to mrrs/3DR_benchmark/MeshComparison.py diff --git a/mrrs/metrics/__init__.py b/mrrs/3DR_benchmark/__init__.py similarity index 100% rename from mrrs/metrics/__init__.py rename to mrrs/3DR_benchmark/__init__.py diff --git a/mrrs/datasets/__init__.py b/mrrs/3DR_benchmark/datasets/__init__.py similarity index 100% rename from mrrs/datasets/__init__.py rename to mrrs/3DR_benchmark/datasets/__init__.py diff --git a/mrrs/datasets/alab.py b/mrrs/3DR_benchmark/datasets/alab.py similarity index 100% rename from mrrs/datasets/alab.py rename to mrrs/3DR_benchmark/datasets/alab.py diff --git a/mrrs/datasets/baptiste.py b/mrrs/3DR_benchmark/datasets/baptiste.py similarity index 100% rename from mrrs/datasets/baptiste.py rename to mrrs/3DR_benchmark/datasets/baptiste.py diff --git a/mrrs/datasets/blendedMVG.py b/mrrs/3DR_benchmark/datasets/blendedMVG.py similarity index 100% rename from mrrs/datasets/blendedMVG.py rename to mrrs/3DR_benchmark/datasets/blendedMVG.py diff --git a/mrrs/datasets/dtu.py b/mrrs/3DR_benchmark/datasets/dtu.py similarity index 100% rename from mrrs/datasets/dtu.py rename to mrrs/3DR_benchmark/datasets/dtu.py diff --git a/mrrs/datasets/eth3d.py b/mrrs/3DR_benchmark/datasets/eth3d.py similarity index 100% rename from mrrs/datasets/eth3d.py rename to mrrs/3DR_benchmark/datasets/eth3d.py diff --git a/mrrs/datasets/nerf.py b/mrrs/3DR_benchmark/datasets/nerf.py similarity index 100% rename from mrrs/datasets/nerf.py rename to mrrs/3DR_benchmark/datasets/nerf.py diff --git a/mrrs/datasets/reality_capture.py b/mrrs/3DR_benchmark/datasets/reality_capture.py similarity index 100% rename from mrrs/datasets/reality_capture.py rename to mrrs/3DR_benchmark/datasets/reality_capture.py diff --git a/mrrs/nodes/__init__.py b/mrrs/3DR_benchmark/metrics/__init__.py similarity index 100% rename from mrrs/nodes/__init__.py rename to mrrs/3DR_benchmark/metrics/__init__.py diff --git a/mrrs/metrics/chamfer_distance/__init__.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/__init__.py similarity index 100% rename from mrrs/metrics/chamfer_distance/__init__.py rename to mrrs/3DR_benchmark/metrics/chamfer_distance/__init__.py diff --git a/mrrs/metrics/chamfer_distance/clean_mesh.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/clean_mesh.py similarity index 100% rename from mrrs/metrics/chamfer_distance/clean_mesh.py rename to mrrs/3DR_benchmark/metrics/chamfer_distance/clean_mesh.py diff --git a/mrrs/metrics/chamfer_distance/environnement.yaml b/mrrs/3DR_benchmark/metrics/chamfer_distance/environnement.yaml similarity index 100% rename from mrrs/metrics/chamfer_distance/environnement.yaml rename to mrrs/3DR_benchmark/metrics/chamfer_distance/environnement.yaml diff --git a/mrrs/metrics/chamfer_distance/eval_pcd.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/eval_pcd.py similarity index 100% rename from mrrs/metrics/chamfer_distance/eval_pcd.py rename to mrrs/3DR_benchmark/metrics/chamfer_distance/eval_pcd.py diff --git a/mrrs/metrics/chamfer_distance/make_gt_surface.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/make_gt_surface.py similarity index 100% rename from mrrs/metrics/chamfer_distance/make_gt_surface.py rename to mrrs/3DR_benchmark/metrics/chamfer_distance/make_gt_surface.py diff --git a/mrrs/metrics/chamfer_distance/remove_invisible_faces.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/remove_invisible_faces.py similarity index 100% rename from mrrs/metrics/chamfer_distance/remove_invisible_faces.py rename to mrrs/3DR_benchmark/metrics/chamfer_distance/remove_invisible_faces.py diff --git a/mrrs/metrics/chamfer_distance/utils.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/utils.py similarity index 100% rename from mrrs/metrics/chamfer_distance/utils.py rename to mrrs/3DR_benchmark/metrics/chamfer_distance/utils.py diff --git a/mrrs/metrics/dtu/__init__.py b/mrrs/3DR_benchmark/metrics/dtu/__init__.py similarity index 100% rename from mrrs/metrics/dtu/__init__.py rename to mrrs/3DR_benchmark/metrics/dtu/__init__.py diff --git a/mrrs/metrics/dtu/dtu_eval.py b/mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py similarity index 100% rename from mrrs/metrics/dtu/dtu_eval.py rename to mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py diff --git a/mrrs/metrics/dtu/environnement.yaml b/mrrs/3DR_benchmark/metrics/dtu/environnement.yaml similarity index 100% rename from mrrs/metrics/dtu/environnement.yaml rename to mrrs/3DR_benchmark/metrics/dtu/environnement.yaml diff --git a/mrrs/metrics/metrics.py b/mrrs/3DR_benchmark/metrics/metrics.py similarity index 100% rename from mrrs/metrics/metrics.py rename to mrrs/3DR_benchmark/metrics/metrics.py diff --git a/mrrs/nodes/README.md b/mrrs/README.md similarity index 100% rename from mrrs/nodes/README.md rename to mrrs/README.md diff --git a/mrrs/nodes/render/CreateTrackingMarkers.py b/mrrs/blender/render/CreateTrackingMarkers.py similarity index 100% rename from mrrs/nodes/render/CreateTrackingMarkers.py rename to mrrs/blender/render/CreateTrackingMarkers.py diff --git a/mrrs/nodes/render/Render360.py b/mrrs/blender/render/Render360.py similarity index 100% rename from mrrs/nodes/render/Render360.py rename to mrrs/blender/render/Render360.py diff --git a/mrrs/nodes/render/RenderMesh.py b/mrrs/blender/render/RenderMesh.py similarity index 100% rename from mrrs/nodes/render/RenderMesh.py rename to mrrs/blender/render/RenderMesh.py diff --git a/mrrs/nodes/render/RenderOverlay.py b/mrrs/blender/render/RenderOverlay.py similarity index 100% rename from mrrs/nodes/render/RenderOverlay.py rename to mrrs/blender/render/RenderOverlay.py diff --git a/mrrs/nodes/render/SyntheticDataset.py b/mrrs/blender/render/SyntheticDataset.py similarity index 100% rename from mrrs/nodes/render/SyntheticDataset.py rename to mrrs/blender/render/SyntheticDataset.py diff --git a/mrrs/nodes/render/__init__.py b/mrrs/blender/render/__init__.py similarity index 100% rename from mrrs/nodes/render/__init__.py rename to mrrs/blender/render/__init__.py diff --git a/mrrs/nodes/colmap/AutomaticReconstructor.py b/mrrs/colmap/AutomaticReconstructor.py similarity index 100% rename from mrrs/nodes/colmap/AutomaticReconstructor.py rename to mrrs/colmap/AutomaticReconstructor.py diff --git a/mrrs/nodes/colmap/Colmap2MeshroomSfmConvertions.py b/mrrs/colmap/Colmap2MeshroomSfmConvertions.py similarity index 100% rename from mrrs/nodes/colmap/Colmap2MeshroomSfmConvertions.py rename to mrrs/colmap/Colmap2MeshroomSfmConvertions.py diff --git a/mrrs/nodes/colmap/DelaunayMesher.py b/mrrs/colmap/DelaunayMesher.py similarity index 100% rename from mrrs/nodes/colmap/DelaunayMesher.py rename to mrrs/colmap/DelaunayMesher.py diff --git a/mrrs/nodes/colmap/FeatureExtraction.py b/mrrs/colmap/FeatureExtraction.py similarity index 100% rename from mrrs/nodes/colmap/FeatureExtraction.py rename to mrrs/colmap/FeatureExtraction.py diff --git a/mrrs/nodes/colmap/FeatureMatching.py b/mrrs/colmap/FeatureMatching.py similarity index 100% rename from mrrs/nodes/colmap/FeatureMatching.py rename to mrrs/colmap/FeatureMatching.py diff --git a/mrrs/nodes/colmap/ImageUndistorder.py b/mrrs/colmap/ImageUndistorder.py similarity index 100% rename from mrrs/nodes/colmap/ImageUndistorder.py rename to mrrs/colmap/ImageUndistorder.py diff --git a/mrrs/nodes/colmap/ImportColmapDepthMaps.py b/mrrs/colmap/ImportColmapDepthMaps.py similarity index 100% rename from mrrs/nodes/colmap/ImportColmapDepthMaps.py rename to mrrs/colmap/ImportColmapDepthMaps.py diff --git a/mrrs/nodes/colmap/Mapper.py b/mrrs/colmap/Mapper.py similarity index 100% rename from mrrs/nodes/colmap/Mapper.py rename to mrrs/colmap/Mapper.py diff --git a/mrrs/nodes/colmap/Meshroom2ColmapSfmConvertions.py b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py similarity index 100% rename from mrrs/nodes/colmap/Meshroom2ColmapSfmConvertions.py rename to mrrs/colmap/Meshroom2ColmapSfmConvertions.py diff --git a/mrrs/nodes/colmap/PatchMatchStereo.py b/mrrs/colmap/PatchMatchStereo.py similarity index 100% rename from mrrs/nodes/colmap/PatchMatchStereo.py rename to mrrs/colmap/PatchMatchStereo.py diff --git a/mrrs/nodes/colmap/PoissonMesher.py b/mrrs/colmap/PoissonMesher.py similarity index 100% rename from mrrs/nodes/colmap/PoissonMesher.py rename to mrrs/colmap/PoissonMesher.py diff --git a/mrrs/nodes/colmap/StereoFusion.py b/mrrs/colmap/StereoFusion.py similarity index 100% rename from mrrs/nodes/colmap/StereoFusion.py rename to mrrs/colmap/StereoFusion.py diff --git a/mrrs/nodes/colmap/__init__.py b/mrrs/colmap/__init__.py similarity index 100% rename from mrrs/nodes/colmap/__init__.py rename to mrrs/colmap/__init__.py diff --git a/mrrs/core/CondaNode.py b/mrrs/core/CondaNode.py deleted file mode 100644 index 6049c5b..0000000 --- a/mrrs/core/CondaNode.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -This class defines a node made to build and call conda before running a CL node. -Conda needs to be installed and callable via "conda" -""" - -import os -from meshroom.core import desc -from meshroom.core import defaultCacheFolder - -#TODO: add mode to not run as CLI to be able to debug -class CondaNode(desc.CommandLineNode): - # def __init__(self): - # super().__init__() #TODO check if conda to path - - """path to yaml file""" - env_file = None - - """path to the conda env, will be initialised if not existing""" - env_path = None - - def curate_env_command(self): - """ - Used to unset all rez defined env that messes up with conda. - """ - cmd="" - for env_var in os.environ.keys(): - if ((("py" in env_var) or ("PY" in env_var)) - and ("REZ" not in env_var) and ("." not in env_var) and ("-" not in env_var)): - if env_var.endswith("()"):#function get special treatment - cmd+='unset -f '+env_var[10:-2]+'; ' - else: - cmd+='unset '+env_var+'; ' - return cmd - - def buildCommandLine(self, chunk): - cmdPrefix = '' - #create the env in the folder above the node - if self.env_path is None: - env_path=os.path.join(defaultCacheFolder, "env_"+self.__class__.__name__)#env name from class - else: - env_path=self.env_path - if not os.path.exists(env_path): - chunk.logger.info("Creating conda env in "+env_path) - if not os.path.exists(self.env_file): - raise RuntimeError('No yaml file found.') - make_env_command = self.curate_env_command()+" conda config --set channel_priority strict; "+" conda env create --prefix {env_path} --file {env_file}".format(env_path=env_path, env_file=self.env_file) - print("Building env") - print(make_env_command) - os.system(make_env_command) - #add the prefix to the command line - cmdPrefix = self.curate_env_command()+' conda run --no-capture-output -p {env_path} '.format(env_path=env_path) - cmdSuffix = '' - if chunk.node.isParallelized and chunk.node.size > 1: - cmdSuffix = ' ' + self.commandLineRange.format(**chunk.range.toDict()) - return cmdPrefix + chunk.node.nodeDesc.commandLine.format(**chunk.node._cmdVars) + cmdSuffix - - def processChunk(self, chunk): - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - with open(chunk.logFile, 'w') as logF: - cmd = self.buildCommandLine(chunk) - chunk.status.commandLine = cmd - chunk.saveStatusFile() - print(' - commandLine: {}'.format(cmd)) - print(' - logFile: {}'.format(chunk.logFile)) - #unset doesnt work with subprocess, and removing the variables from the env dict does not work either - chunk.status.returnCode = os.system(cmd) - logContent="" - - if chunk.status.returnCode != 0: - with open(chunk.logFile, 'r') as logF: - logContent = ''.join(logF.readlines()) - raise RuntimeError('Error on node "{}":\nLog:\n{}'.format(chunk.name, logContent)) - except: - chunk.logManager.end() - raise - chunk.logManager.end() \ No newline at end of file diff --git a/mrrs/nodes/nerf/nerfstudio.py b/mrrs/nerf/nerfstudio.py similarity index 100% rename from mrrs/nodes/nerf/nerfstudio.py rename to mrrs/nerf/nerfstudio.py diff --git a/mrrs/nodes/nerf/nerfstudio_export.py b/mrrs/nerf/nerfstudio_export.py similarity index 100% rename from mrrs/nodes/nerf/nerfstudio_export.py rename to mrrs/nerf/nerfstudio_export.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/__init__.py b/mrrs/nodes/stereo_photometry/Uni_MS_PS/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/mrrs/nodes/utils/__init__.py b/mrrs/nodes/utils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/mrrs/nodes/reality_capture/ExportXMP.py b/mrrs/reality_capture/ExportXMP.py similarity index 100% rename from mrrs/nodes/reality_capture/ExportXMP.py rename to mrrs/reality_capture/ExportXMP.py diff --git a/mrrs/nodes/reality_capture/ImportXMP.py b/mrrs/reality_capture/ImportXMP.py similarity index 100% rename from mrrs/nodes/reality_capture/ImportXMP.py rename to mrrs/reality_capture/ImportXMP.py diff --git a/mrrs/nodes/benchmark/__init__.py b/mrrs/reality_capture/__init__.py similarity index 100% rename from mrrs/nodes/benchmark/__init__.py rename to mrrs/reality_capture/__init__.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/MPS_NET.py b/mrrs/stereo_photometry/MS_PS/MPS_NET.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/MPS_NET.py rename to mrrs/stereo_photometry/MS_PS/MPS_NET.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/MS_PS.py b/mrrs/stereo_photometry/MS_PS/MS_PS.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/MS_PS.py rename to mrrs/stereo_photometry/MS_PS/MS_PS.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/NENet.py b/mrrs/stereo_photometry/MS_PS/NENet.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/NENet.py rename to mrrs/stereo_photometry/MS_PS/NENet.py diff --git a/mrrs/nodes/nerf/__init__.py b/mrrs/stereo_photometry/MS_PS/__init__.py similarity index 100% rename from mrrs/nodes/nerf/__init__.py rename to mrrs/stereo_photometry/MS_PS/__init__.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/conv.py b/mrrs/stereo_photometry/MS_PS/conv.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/conv.py rename to mrrs/stereo_photometry/MS_PS/conv.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/feature_extractor.py b/mrrs/stereo_photometry/MS_PS/feature_extractor.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/feature_extractor.py rename to mrrs/stereo_photometry/MS_PS/feature_extractor.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/launch.py b/mrrs/stereo_photometry/MS_PS/launch.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/launch.py rename to mrrs/stereo_photometry/MS_PS/launch.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/utils.py b/mrrs/stereo_photometry/MS_PS/utils.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/utils.py rename to mrrs/stereo_photometry/MS_PS/utils.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/weight/.gitkeep b/mrrs/stereo_photometry/MS_PS/weight/.gitkeep similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/weight/.gitkeep rename to mrrs/stereo_photometry/MS_PS/weight/.gitkeep diff --git a/mrrs/nodes/stereo_photometry/MS_PS/weight_url.txt b/mrrs/stereo_photometry/MS_PS/weight_url.txt similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/weight_url.txt rename to mrrs/stereo_photometry/MS_PS/weight_url.txt diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/Transformer_8.py b/mrrs/stereo_photometry/Uni_MS_PS/Transformer_8.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/Transformer_8.py rename to mrrs/stereo_photometry/Uni_MS_PS/Transformer_8.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/Transformer_8_layer.py b/mrrs/stereo_photometry/Uni_MS_PS/Transformer_8_layer.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/Transformer_8_layer.py rename to mrrs/stereo_photometry/Uni_MS_PS/Transformer_8_layer.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/Transformer_multi_res_7.py b/mrrs/stereo_photometry/Uni_MS_PS/Transformer_multi_res_7.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/Transformer_multi_res_7.py rename to mrrs/stereo_photometry/Uni_MS_PS/Transformer_multi_res_7.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/Uni_MS_PS.py b/mrrs/stereo_photometry/Uni_MS_PS/Uni_MS_PS.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/Uni_MS_PS.py rename to mrrs/stereo_photometry/Uni_MS_PS/Uni_MS_PS.py diff --git a/mrrs/nodes/reality_capture/__init__.py b/mrrs/stereo_photometry/Uni_MS_PS/__init__.py similarity index 100% rename from mrrs/nodes/reality_capture/__init__.py rename to mrrs/stereo_photometry/Uni_MS_PS/__init__.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/deppading.py b/mrrs/stereo_photometry/Uni_MS_PS/deppading.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/deppading.py rename to mrrs/stereo_photometry/Uni_MS_PS/deppading.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/inference_file.py b/mrrs/stereo_photometry/Uni_MS_PS/inference_file.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/inference_file.py rename to mrrs/stereo_photometry/Uni_MS_PS/inference_file.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/launch.py b/mrrs/stereo_photometry/Uni_MS_PS/launch.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/launch.py rename to mrrs/stereo_photometry/Uni_MS_PS/launch.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/run.py b/mrrs/stereo_photometry/Uni_MS_PS/run.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/run.py rename to mrrs/stereo_photometry/Uni_MS_PS/run.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/transformer_modules.py b/mrrs/stereo_photometry/Uni_MS_PS/transformer_modules.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/transformer_modules.py rename to mrrs/stereo_photometry/Uni_MS_PS/transformer_modules.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/utils.py b/mrrs/stereo_photometry/Uni_MS_PS/utils.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/utils.py rename to mrrs/stereo_photometry/Uni_MS_PS/utils.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/utils_process.py b/mrrs/stereo_photometry/Uni_MS_PS/utils_process.py similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/utils_process.py rename to mrrs/stereo_photometry/Uni_MS_PS/utils_process.py diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/weight/.gitkeep b/mrrs/stereo_photometry/Uni_MS_PS/weight/.gitkeep similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/weight/.gitkeep rename to mrrs/stereo_photometry/Uni_MS_PS/weight/.gitkeep diff --git a/mrrs/nodes/stereo_photometry/Uni_MS_PS/weight_url.txt b/mrrs/stereo_photometry/Uni_MS_PS/weight_url.txt similarity index 100% rename from mrrs/nodes/stereo_photometry/Uni_MS_PS/weight_url.txt rename to mrrs/stereo_photometry/Uni_MS_PS/weight_url.txt diff --git a/mrrs/nodes/utils/CalibTransform.py b/mrrs/utils/CalibTransform.py similarity index 100% rename from mrrs/nodes/utils/CalibTransform.py rename to mrrs/utils/CalibTransform.py diff --git a/mrrs/nodes/utils/ComputeNormals.py b/mrrs/utils/ComputeNormals.py similarity index 100% rename from mrrs/nodes/utils/ComputeNormals.py rename to mrrs/utils/ComputeNormals.py diff --git a/mrrs/nodes/utils/ConvertImages.py b/mrrs/utils/ConvertImages.py similarity index 100% rename from mrrs/nodes/utils/ConvertImages.py rename to mrrs/utils/ConvertImages.py diff --git a/mrrs/nodes/utils/CopyData.py b/mrrs/utils/CopyData.py similarity index 100% rename from mrrs/nodes/utils/CopyData.py rename to mrrs/utils/CopyData.py diff --git a/mrrs/nodes/utils/CutSfm.py b/mrrs/utils/CutSfm.py similarity index 100% rename from mrrs/nodes/utils/CutSfm.py rename to mrrs/utils/CutSfm.py diff --git a/mrrs/nodes/utils/DepthMapTransform.py b/mrrs/utils/DepthMapTransform.py similarity index 100% rename from mrrs/nodes/utils/DepthMapTransform.py rename to mrrs/utils/DepthMapTransform.py diff --git a/mrrs/nodes/utils/ExecuteCmdConda.py b/mrrs/utils/ExecuteCmdConda.py similarity index 100% rename from mrrs/nodes/utils/ExecuteCmdConda.py rename to mrrs/utils/ExecuteCmdConda.py diff --git a/mrrs/nodes/utils/InjectSfmData.py b/mrrs/utils/InjectSfmData.py similarity index 100% rename from mrrs/nodes/utils/InjectSfmData.py rename to mrrs/utils/InjectSfmData.py diff --git a/mrrs/nodes/utils/MeshTransform.py b/mrrs/utils/MeshTransform.py similarity index 100% rename from mrrs/nodes/utils/MeshTransform.py rename to mrrs/utils/MeshTransform.py diff --git a/mrrs/nodes/utils/Seq2Video.py b/mrrs/utils/Seq2Video.py similarity index 100% rename from mrrs/nodes/utils/Seq2Video.py rename to mrrs/utils/Seq2Video.py diff --git a/mrrs/nodes/stereo_photometry/MS_PS/__init__.py b/mrrs/utils/__init__.py similarity index 100% rename from mrrs/nodes/stereo_photometry/MS_PS/__init__.py rename to mrrs/utils/__init__.py From 0cfb4e8529e5da50259240cd1b49703c2fcb83fd Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 29 May 2024 17:47:14 +0200 Subject: [PATCH 04/32] updated meshroomPlugins.json --- meshrooPlugin.json | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/meshrooPlugin.json b/meshrooPlugin.json index b0951d2..30ec22e 100644 --- a/meshrooPlugin.json +++ b/meshrooPlugin.json @@ -1,14 +1,38 @@ [ + { + "pluginName":"MRRS Core", + "nodesFolder":"core" + }, { - "pluginName":"3DR Benchmark", - "nodesFolder":"3DR_benchmark" + "pluginName":"3DR Benchmark", + "nodesFolder":"3DR_benchmark" }, { "pluginName":"Gaussian Splatting", "nodesFolder":"gaussian_splat" }, { - "pluginName":"Dummy", - "nodesFolder":"dummy" + "pluginName":"Colmap", + "nodesFolder":"colmap" + }, + { + "pluginName":"Deep Depth Maps", + "nodesFolder":"deep_depth_map" + }, + { + "pluginName":"Nerfstudio", + "nodesFolder":"nerfstudio" + }, + { + "pluginName":"Reality Capture", + "nodesFolder":"reality_capture" + }, + { + "pluginName":"Stereo Photometry", + "nodesFolder":"stereo_photometry" + }, + { + "pluginName":"Utils", + "nodesFolder":"utils" } ] \ No newline at end of file From b80b9f9095e0857470896a48e3522b26c33d61a0 Mon Sep 17 00:00:00 2001 From: mhog <72275161+mh0g@users.noreply.github.com> Date: Wed, 29 May 2024 17:15:49 +0200 Subject: [PATCH 05/32] Update .gitmodules --- .gitmodules | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.gitmodules b/.gitmodules index 81e1da5..9a6b2a8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,12 +1,3 @@ -[submodule "mrrs/deep_mvs/robust_mvd/robustmvd"] - path = mrrs/deep_mvs/robust_mvd/robustmvd - url = https://github.com/lmb-freiburg/robustmvd -[submodule "mrrs/implicit_mesh/instant_ngp/instant-ngp"] - path = mrrs/implicit_mesh/instant_ngp/instant-ngp - url = https://github.com/NVlabs/instant-ngp.git -[submodule "mrrs/depth_maps/vismvsnet/Vis-MVSNet"] - path = mrrs/depth_maps/vismvsnet/Vis-MVSNet - url = https://github.com/jzhangbs/Vis-MVSNet [submodule "mrrs/deep_depth_map/Vis-MVSNet"] path = mrrs/deep_depth_map/Vis-MVSNet url = https://github.com/jzhangbs/Vis-MVSNet From 649cdfb4b79a2e7bac8610ce3795fa6b0f1428f4 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Thu, 30 May 2024 12:24:31 +0200 Subject: [PATCH 06/32] typo --- meshrooPlugin.json => meshroomPlugin.json | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename meshrooPlugin.json => meshroomPlugin.json (100%) diff --git a/meshrooPlugin.json b/meshroomPlugin.json similarity index 100% rename from meshrooPlugin.json rename to meshroomPlugin.json From 1c8d2fea48e13afa0aebed17cf6dedeeda422778 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 3 Jun 2024 11:42:27 +0200 Subject: [PATCH 07/32] updated setup.py to only install core lib, cleanup and renaming for core --- meshroomPlugin.json | 20 +- mrrs/3DR_benchmark/CalibrationComparison.py | 2 +- mrrs/3DR_benchmark/CleanMesh.py | 5 +- mrrs/3DR_benchmark/MeshComparison.py | 5 +- mrrs/3DR_benchmark/metrics/dtu/__init__.py | 2 - mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py | 426 ------------------ .../metrics/dtu/environnement.yaml | 21 - mrrs/nerf/nerfstudio.py | 2 +- mrrs/nerf/nerfstudio_export.py | 2 +- mrrs/utils/ExecuteCmdConda.py | 2 +- setup.py | 29 +- 11 files changed, 27 insertions(+), 489 deletions(-) delete mode 100644 mrrs/3DR_benchmark/metrics/dtu/__init__.py delete mode 100644 mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py delete mode 100644 mrrs/3DR_benchmark/metrics/dtu/environnement.yaml diff --git a/meshroomPlugin.json b/meshroomPlugin.json index 30ec22e..29d6f58 100644 --- a/meshroomPlugin.json +++ b/meshroomPlugin.json @@ -1,38 +1,38 @@ [ { - "pluginName":"MRRS Core", - "nodesFolder":"core" + "pluginName":"MRRS", + "nodesFolder":"mrrs/core" }, { "pluginName":"3DR Benchmark", - "nodesFolder":"3DR_benchmark" + "nodesFolder":"mrrs/3DR_benchmark" }, { "pluginName":"Gaussian Splatting", - "nodesFolder":"gaussian_splat" + "nodesFolder":"mrrs/gaussian_splatting" }, { "pluginName":"Colmap", - "nodesFolder":"colmap" + "nodesFolder":"mrrs/colmap" }, { "pluginName":"Deep Depth Maps", - "nodesFolder":"deep_depth_map" + "nodesFolder":"mrrs/deep_depth_map" }, { "pluginName":"Nerfstudio", - "nodesFolder":"nerfstudio" + "nodesFolder":"mrrs/nerfstudio" }, { "pluginName":"Reality Capture", - "nodesFolder":"reality_capture" + "nodesFolder":"mrrs/reality_capture" }, { "pluginName":"Stereo Photometry", - "nodesFolder":"stereo_photometry" + "nodesFolder":"mrrs/stereo_photometry" }, { "pluginName":"Utils", - "nodesFolder":"utils" + "nodesFolder":"mrrs/utils" } ] \ No newline at end of file diff --git a/mrrs/3DR_benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py index e08cf17..121dd78 100644 --- a/mrrs/3DR_benchmark/CalibrationComparison.py +++ b/mrrs/3DR_benchmark/CalibrationComparison.py @@ -9,7 +9,7 @@ from meshroom.core import desc from mrrs.core.ios import * -from mrrs.metrics.metrics import * +from .metrics.metrics import * class CalibrationComparison(desc.Node): category = 'Meshroom Research' diff --git a/mrrs/3DR_benchmark/CleanMesh.py b/mrrs/3DR_benchmark/CleanMesh.py index 24c0285..33df74a 100644 --- a/mrrs/3DR_benchmark/CleanMesh.py +++ b/mrrs/3DR_benchmark/CleanMesh.py @@ -1,8 +1,9 @@ __version__ = "1.0" import os from meshroom.core import desc -from mrrs.core.CondaNode import CondaNode -from mrrs.metrics.chamfer_distance import ENV_FILE +from meshroom.core.plugin import CondaNode + +from .metrics.chamfer_distance import ENV_FILE class CleanMesh(CondaNode): diff --git a/mrrs/3DR_benchmark/MeshComparison.py b/mrrs/3DR_benchmark/MeshComparison.py index 6795dc4..3178f38 100644 --- a/mrrs/3DR_benchmark/MeshComparison.py +++ b/mrrs/3DR_benchmark/MeshComparison.py @@ -1,8 +1,9 @@ __version__ = "1.0" import os from meshroom.core import desc -from mrrs.core.CondaNode import CondaNode -from mrrs.metrics.chamfer_distance import ENV_FILE +from meshroom.core.plugin import CondaNode + +from .metrics.chamfer_distance import ENV_FILE class MeshcomparisonBaptiste(CondaNode): diff --git a/mrrs/3DR_benchmark/metrics/dtu/__init__.py b/mrrs/3DR_benchmark/metrics/dtu/__init__.py deleted file mode 100644 index 9e1b499..0000000 --- a/mrrs/3DR_benchmark/metrics/dtu/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -import os -ENV_FILE = os.path.join(os.path.dirname(__file__), 'environnement.yaml') \ No newline at end of file diff --git a/mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py b/mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py deleted file mode 100644 index 73e20b8..0000000 --- a/mrrs/3DR_benchmark/metrics/dtu/dtu_eval.py +++ /dev/null @@ -1,426 +0,0 @@ -import os -import multiprocessing as mp -import argparse -import csv -from PIL import Image - -import numpy as np -import sklearn.neighbors as skln -from tqdm import tqdm -from scipy.io import loadmat -import json - -from skimage.morphology import binary_dilation -from skimage.draw import disk - - -mrrs_path=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) -print("mrrs path "+mrrs_path) -# FIXME: not sure why the pip install in the env does not work, maybe because of the unsets -import sys -sys.path.insert(0, mrrs_path) - -from mrrs.core.ios import matrices_from_sfm_data -from mrrs.core.geometry import camera_projection, transform_cg_cv - -# import open3d as o3d -import trimesh - -def sample_single_tri(input_): - n1, n2, v1, v2, tri_vert = input_ - c = np.mgrid[:n1+1, :n2+1] - c += 0.5 - c[0] /= max(n1, 1e-7) - c[1] /= max(n2, 1e-7) - c = np.transpose(c, (1,2,0)) - k = c[c.sum(axis=-1) < 1] # m2 - q = v1 * k[:,:1] + v2 * k[:,1:] + tri_vert - return q - -def sample_pcd(tri_vert): - v1 = tri_vert[:,1] - tri_vert[:,0] - v2 = tri_vert[:,2] - tri_vert[:,0] - l1 = np.linalg.norm(v1, axis=-1, keepdims=True) - l2 = np.linalg.norm(v2, axis=-1, keepdims=True) - area2 = np.linalg.norm(np.cross(v1, v2), axis=-1, keepdims=True) - non_zero_area = np.squeeze((area2 > 0)) - l1, l2, area2, v1, v2, tri_vert = [ - arr[non_zero_area] for arr in [l1, l2, area2, v1, v2, tri_vert] - ] - - non_zero_area = np.squeeze((area2 > 0)[:,0]) - thr = thresh * np.sqrt(l1 * l2 / area2) - n1 = np.floor(l1 / thr) - n2 = np.floor(l2 / thr) - - with mp.Pool() as mp_pool: - new_pts = mp_pool.map(sample_single_tri, ((n1[i,0], n2[i,0], v1[i:i+1], v2[i:i+1], tri_vert[i:i+1,0]) for i in range(len(n1))), chunksize=1024) - - new_pts = np.concatenate(new_pts, axis=0) - data_pcd = np.concatenate([vertices, new_pts], axis=0) - return data_pcd - -def write_vis_pcd(file, points, colors): - # pcd = o3d.geometry.PointCloud() - # pcd.points = o3d.utility.Vector3dVector(points) - # pcd.colors = o3d.utility.Vector3dVector(colors) - # o3d.io.write_point_cloud(file, pcd) - trimesh.points.PointCloud(points, colors).export(file) - -if __name__ == '__main__': - print("DTU Bench") - mp.freeze_support() - - parser = argparse.ArgumentParser() - #input - parser.add_argument('--data', type=str)#mesh or pouit cloud - parser.add_argument('--gt_sfm', type=str)#gt sfm - parser.add_argument('--gt_mesh', type=str)#gt mesh - #optional input - parser.add_argument('--obs_mask', default='', type=str) - parser.add_argument('--ground_plane', default='', type=str) - parser.add_argument('--mask_folder', default='', type=str) - #optional params - parser.add_argument('--eval_dir', type=str, default='.')#output dir - parser.add_argument('--mode', type=str, default='mesh', choices=['mesh', 'pcd']) - parser.add_argument('--suffix', type=str, default='') - parser.add_argument('--downsample_density', type=float, default=0.2) - parser.add_argument('--patch_size', type=float, default=60) - parser.add_argument('--max_dist', type=float, default=20) - parser.add_argument('--visualize_threshold', type=float, default=10) - #params for mask filtering - parser.add_argument("--dilatation_radius", type=int, default=12, help="Radius for mask dilatation (default: 12)") - parser.add_argument("--not_main_component", type=str, choices=["True", "False"], default=False, help="Not to keep the largest main component") - args = parser.parse_args() - - #parse the arguments from the sfm data - print("Loading sfm") - sfm_data = json.load(open(args.gt_sfm, "r")) - - thresh = args.downsample_density - if args.mode == 'mesh': - print("Mode mesh") - pbar = tqdm(total=10) - pbar.set_description('read data mesh') - data_mesh = trimesh.load(args.data)#o3d.io.read_triangle_mesh(args.data)#switch to trimesh - data_mesh.remove_unreferenced_vertices() - extrinsics_all_cams, intrinsics_all_cams, _, _, _, pixel_sizes_all_cams = matrices_from_sfm_data(sfm_data) - - ##Added filtering from masks - masks = [] - if args.mask_folder != '': - print("\nFiltering") - # Load masks - # opens masks in the same order as the input sfm - for view in sfm_data["views"]: - view_number = int(os.path.basename(view["path"]).split(".")[0]) - # masks.append(os.path.join(sfm_data["groundTruthDTU"]["obsMaskFolder"],"%03d.png"%view_number)) - masks.append(os.path.join(args.mask_folder,"%03d.png"%view_number)) - nb_images = len(masks) - - dilatation_radius = args.dilatation_radius - circle_image = np.zeros((2 * dilatation_radius - 1, 2 * dilatation_radius - 1)) - circle_image[disk((dilatation_radius - 1, dilatation_radius - 1), dilatation_radius)] = 1 - - # Clean mesh using masks and camera poses - pbar.update(1) - pbar.set_description('project points in dilated masks') - if len(intrinsics_all_cams) != nb_images: - raise RuntimeError("Nonmatching mask and intrinsic resolution %d vs %d"%(nb_images, len(intrinsics_all_cams))) - - for i in tqdm(range(nb_images)): - # Load mask image - mask_image_path = masks[i] - print("\n") - print("Opening "+mask_image_path +" view "+sfm_data["views"][i]["path"]) - mask_image = np.array(Image.open(mask_image_path))[:, :, 0] > 0 # Assuming mask is stored in the red channel - - # Dilate mask - dilated_mask = binary_dilation(mask_image, circle_image) - - # # Project 3D points onto the mask - # points = mesh.vertices - # projected_points = trimesh.transformations.transform_points(points, worldMats[i]) - # projected_points[:, :2] /= projected_points[:, 2, np.newaxis] # Normalize projected points - # projected_points = projected_points[:, :2] - - vertices = transform_cg_cv(data_mesh.vertices) - projected_points, _ = camera_projection(vertices, extrinsics_all_cams[i], - intrinsics_all_cams[i], pixel_sizes_all_cams[i]) - #FIXME: projection issues? - # print(data_mesh.vertices) - # print(projected_points) - - # Find points inside the image bounds - image_height, image_width = dilated_mask.shape - valid_points = ( - (projected_points[:, 0] >= 0) & - (projected_points[:, 0] < image_width) & - (projected_points[:, 1] >= 0) & - (projected_points[:, 1] < image_height) - ) - print("%d valid points"%projected_points[valid_points].shape[0]) - img=np.array(Image.open(sfm_data["views"][i]["path"])) - for p in projected_points[valid_points]: - img[p[1],p[0],:]=[255,0,0] - Image.fromarray(img).save(f'{args.eval_dir}/%d.png'%i) - - # Find points inside the mask - points_inside_mask = dilated_mask[ - np.floor(projected_points[valid_points, 1]).astype(int), - np.floor(projected_points[valid_points, 0]).astype(int) - ] - # Ensure both arrays have the same size - valid_points_inside_mask = np.ones_like(valid_points, dtype=bool) - valid_points_inside_mask[valid_points] = points_inside_mask - # Remove points and corresponding faces outside the mask - valid_faces = np.any(valid_points_inside_mask[data_mesh.faces], axis=1) - data_mesh = data_mesh.submesh([valid_faces])[0] - - # Ensure only one component of the mesh is kept - pbar.update(1) - - if args.not_main_component == "False": - pbar.set_description('keep only the largest component mesh') - mesh_components = data_mesh.split(only_watertight=False) - largest_component = max(mesh_components, key=lambda comp: len(comp.vertices)) - print("Nb vertices before %d and after %d largest component "%(data_mesh.vertices.shape[0],largest_component.vertices.shape[0] )) - data_mesh = largest_component - # Save cleaned mesh - pbar.update(1) - pbar.set_description('export cleaned mesh') - - data_mesh.export(f'{args.eval_dir}/cleaned_mesh.obj') - - pbar.update(1) - pbar.set_description('done') - pbar.close() - - mp.freeze_support() - - vertices = np.asarray(data_mesh.vertices) - triangles = np.asarray(data_mesh.faces).astype(np.int32) - tri_vert = vertices[triangles] - - pbar.update(1) - pbar.set_description('sample pcd from mesh') - data_pcd = sample_pcd(tri_vert) - - elif args.mode == 'pcd': - print("Mode point cloud") - pbar = tqdm(total=9) - pbar.set_description('read data pcd') - # data_pcd_o3d = o3d.io.read_point_cloud(args.data) - # data_pcd = np.asarray(data_pcd_o3d.points) - mesh=trimesh.load(args.data) - data_pcd=mesh.vertices - - print("\nBenching") - data_pcd = data_pcd[~np.isnan(data_pcd).any(axis=1),:] - pbar.update(1) - pbar.set_description('random shuffle pcd index') - shuffle_rng = np.random.default_rng() - shuffle_rng.shuffle(data_pcd, axis=0) - - pbar.update(1) - nn_engine = skln.NearestNeighbors(n_neighbors=1, radius=thresh, algorithm='kd_tree', n_jobs=-1) - nn_engine.fit(data_pcd) - - #rnn_idxs = nn_engine.radius_neighbors(data_pcd[0:100], radius=thresh, return_distance=False) - - if os.path.exists(f'{args.eval_dir}/data_down.ply'):#load the sampling if already computed - print("loading tmp file from "+'{args.eval_dir}/data_down.ply') - data_down = trimesh.load_mesh(f'{args.eval_dir}/data_down.ply').vertices - print("%d points loaded"%data_down.shape[0]) - else:#compute it onterhwise - pbar.set_description('Computing neighbors for %d points'%(data_pcd.shape[0])) - mask = np.ones(data_pcd.shape[0], dtype=np.bool_) - #for curr, idxs in enumerate(rnn_idxs): - CHUNKS = 100 - chunk_size= int(data_pcd.shape[0]/CHUNKS)+1 - pbar.set_description('Computing neighbors for %d points (chunksize %d)'%(data_pcd.shape[0], chunk_size)) - for chunk in range(CHUNKS): - print("%d/%d"%(chunk, CHUNKS)) - chunk_indices=range(chunk*chunk_size, min((chunk+1)*chunk_size,data_pcd.shape[0]) ) - #select poin that have at least N neigbosr nearby? discard the neigs - #note this returns indices in the original data, so it is safely chunkable - rnn_idxs = nn_engine.radius_neighbors(data_pcd[chunk_indices], radius=thresh, return_distance=False) - for curr, idxs in zip(chunk_indices, rnn_idxs):#FIXME: parralelise that - if mask[curr]: - mask[idxs] = 0 - mask[curr] = 1 - - data_down = data_pcd[mask] - print("saving tmp file in "+'{args.eval_dir}/data_down.ply') - #trimesh.PointCloud(data_down).export(f'{args.eval_dir}/data_down.ply', "ply") - trimesh.PointCloud(data_down).export(f'{args.eval_dir}/data_down.obj', "obj") - - #Masking using obesrvation mask - data_in_obs = data_down - data_in = data_down - # inbound = np.one(data_down.shape[0]) - # grid_inbound = - # in_obs = - if args.obs_mask != '': - pbar.update(1) - pbar.set_description('masking data pcd') - obs_mask_file = loadmat(args.obs_mask) #loadmat(f'{args.dataset_dir}/ObsMask/ObsMask{args.scan}_10.mat') - ObsMask, BB, Res = [obs_mask_file[attr] for attr in ['ObsMask', 'BB', 'Res']] - BB = BB.astype(np.float32) - - patch = args.patch_size - inbound = ((data_down >= BB[:1]-patch) & (data_down < BB[1:]+patch*2)).sum(axis=-1) ==3 - data_in = data_down[inbound] - - data_grid = np.around((data_in - BB[:1]) / Res).astype(np.int32) - grid_inbound = ((data_grid >= 0) & (data_grid < np.expand_dims(ObsMask.shape, 0))).sum(axis=-1) ==3 - data_grid_in = data_grid[grid_inbound] - in_obs = ObsMask[data_grid_in[:,0], data_grid_in[:,1], data_grid_in[:,2]].astype(np.bool_) - data_in_obs = data_in[grid_inbound][in_obs] - - # added was added by Baptiste - # ground_plane = loadmat(f'{args.dataset_dir}/ObsMask/Plane{args.scan}.mat')['P'] - # data_hom = np.concatenate([data_in_obs, np.ones_like(data_in_obs[:,:1])], -1) - # data_above_bol = (ground_plane.reshape((1,4)) * data_hom).sum(-1) > 0 - # data_in_obs_above = data_in_obs[data_above_bol] - - pbar.update(1) - pbar.set_description('read STL pcd') - # stl_pcd = o3d.io.read_point_cloud(f'{args.dataset_dir}/Points/stl/stl{args.scan:03}_total.ply') - # stl = np.asarray(stl_pcd.points) - print(args.gt_mesh) - sample_pcd(tri_vert) - gt_mesh = trimesh.load(args.gt_mesh) - if len(gt_mesh.faces) == 0: - print("GT from point cloud") - stl = gt_mesh.vertices - else:#if mesh gt, then sample - GT_vertices = np.asarray(gt_mesh.vertices) - GT_triangles = np.asarray(gt_mesh.faces).astype(np.int32) - gt_tri_vert = vertices[triangles] - stl = sample_pcd(gt_tri_vert) - - #added by bapiste - # stl_hom = np.concatenate([stl, np.ones_like(stl[:,:1])], -1) - # stl_above_bol = (ground_plane.reshape((1,4)) * stl_hom).sum(-1) > 0 - # stl_above = stl[stl_above_bol] - - # stl_above_max = np.max(stl_above,axis=0) - # stl_above_min = np.min(stl_above,axis=0) - # stl_above_scale = np.sqrt(np.sum((stl_above_max - stl_above_min)**2,axis=0)) - - pbar.update(1) - pbar.set_description('compute data2stl') - #modif by paptiste - # nn_engine.fit(stl_above) - # dist_d2s, idx_d2s = nn_engine.kneighbors(data_in_obs_above, n_neighbors=1, return_distance=True) - nn_engine.fit(stl) - dist_d2s, idx_d2s = nn_engine.kneighbors(data_in_obs, n_neighbors=1, return_distance=True) - - max_dist = args.max_dist - mean_d2s = dist_d2s[dist_d2s < max_dist].mean() - - pbar.update(1) - pbar.set_description('compute stl2data') - - ##FILTER ground plane - stl_above=stl - if args.ground_plane != '': - #removed by baptiste - ground_plane = loadmat(args.ground_plane)#loadmat(f'{args.dataset_dir}/ObsMask/Plane{args.scan}.mat')['P'] - stl_hom = np.concatenate([stl, np.ones_like(stl[:,:1])], -1) - above = (ground_plane.reshape((1,4)) * stl_hom).sum(-1) > 0 - stl_above = stl[above] - - nn_engine.fit(data_in) - dist_s2d, idx_s2d = nn_engine.kneighbors(stl_above, n_neighbors=1, return_distance=True) - mean_s2d = dist_s2d[dist_s2d < max_dist].mean() - - pbar.update(1) - pbar.set_description('visualize error') - vis_dist = args.visualize_threshold - R = np.array([[1,0,0]], dtype=np.float64) - G = np.array([[0,1,0]], dtype=np.float64) - B = np.array([[0,0,1]], dtype=np.float64) - W = np.array([[1,1,1]], dtype=np.float64) - data_color = np.tile(B, (data_down.shape[0], 1)) - data_alpha = dist_d2s.clip(max=vis_dist) / vis_dist - #modifs by baptist - # data_color[ np.where(inbound)[0][grid_inbound][in_obs][data_above_bol] ] = R * data_alpha + W * (1-data_alpha) - # data_color[ np.where(inbound)[0][grid_inbound][in_obs][data_above_bol][dist_d2s[:,0] >= max_dist] ] = G - # write_vis_pcd(f'{args.eval_dir}/vis_{args.scan:03}_d2s{args.suffix}.ply', data_down, data_color) - if args.obs_mask != '': - data_color[ np.where(inbound)[0][grid_inbound][in_obs] ] = R * data_alpha + W * (1-data_alpha) - data_color[ np.where(inbound)[0][grid_inbound][in_obs][dist_d2s[:,0] >= max_dist] ] = G - else: - data_color = R * data_alpha + W * (1-data_alpha) - data_color[dist_d2s[:,0] >= max_dist] = G - write_vis_pcd(f'{args.eval_dir}/vis_d2s.ply', data_down, data_color) - - - stl_color = np.tile(B, (stl.shape[0], 1)) - stl_alpha = dist_s2d.clip(max=vis_dist) / vis_dist - #modifs by baptise - # stl_color[ np.where(stl_above_bol)[0] ] = R * stl_alpha + W * (1-stl_alpha) - # stl_color[ np.where(stl_above_bol)[0][dist_s2d[:,0] >= max_dist] ] = G - # write_vis_pcd(f'{args.eval_dir}/vis_{args.scan:03}_s2d{args.suffix}.ply', stl, stl_color) - if args.ground_plane != '': - stl_color[ np.where(above)[0] ] = R * stl_alpha + W * (1-stl_alpha) - stl_color[ np.where(above)[0][dist_s2d[:,0] >= max_dist] ] = G - else: - stl_color= R * stl_alpha + W * (1-stl_alpha) - stl_color[dist_s2d[:,0] >= max_dist] = G - - write_vis_pcd(f'{args.eval_dir}/vis_s2d.ply', stl, stl_color) - - #added by baptiste - pbar.update(1) - pbar.set_description('compute scores') - over_all = (mean_d2s + mean_s2d) / 2 - # print(mean_d2s, mean_s2d, over_all) - - # filtering outliers - dist_d2s_filt = dist_d2s[dist_d2s < max_dist] - dist_s2d_filt = dist_s2d[dist_s2d < max_dist] - - # F-score - dist_thr_mm_list = np.arange(1,11,1) - precisions = [] - recalls = [] - fscores = [] - for ii in range(len(dist_thr_mm_list)): - dist_thr_mm = dist_thr_mm_list[ii] - - precision = np.count_nonzero(dist_d2s_filt < dist_thr_mm) / dist_d2s_filt.size - recall = np.count_nonzero(dist_s2d_filt < dist_thr_mm) / dist_s2d_filt.size - fscore = 2*precision*recall/(precision+recall) - - precisions.append(precision) - recalls.append(recall) - fscores.append(fscore) - - scores_tab = np.zeros((len(dist_thr_mm_list),4)) - scores_tab[:,0] = dist_thr_mm_list - scores_tab[:,1] = precisions - scores_tab[:,2] = recalls - scores_tab[:,3] = fscores - - if not os.path.exists(args.eval_dir): - os.makedirs(args.eval_dir) - - # Write the data to a CSV file - with open(f'{args.eval_dir}/result_{args.suffix}.csv', 'w', newline='') as file: - writer = csv.writer(file) - writer.writerow(['data2stl','stl2data']) - writer.writerow([mean_d2s, mean_s2d]) - writer.writerow(['']) - writer.writerow(['distance thr','precision','recall','f-score']) - for ii in range(len(scores_tab)): - writer.writerow(scores_tab[ii,:]) - - pbar.update(1) - pbar.set_description('done') - pbar.close() - - over_all = (mean_d2s + mean_s2d) / 2 - print(mean_d2s, mean_s2d, over_all) \ No newline at end of file diff --git a/mrrs/3DR_benchmark/metrics/dtu/environnement.yaml b/mrrs/3DR_benchmark/metrics/dtu/environnement.yaml deleted file mode 100644 index 5363a6e..0000000 --- a/mrrs/3DR_benchmark/metrics/dtu/environnement.yaml +++ /dev/null @@ -1,21 +0,0 @@ -name: dtubench -channels: - - conda-forge - - defaults -dependencies: -- python=3.9 -- numpy -- scikit-learn -- scikit-image -- tqdm -- scipy -- trimesh -- pillow -- opencv -- pip -# #FIXME: install mrrs not working? -# - pip: -# - "../../../." --install-option="-e -v" - - - diff --git a/mrrs/nerf/nerfstudio.py b/mrrs/nerf/nerfstudio.py index 6896cfa..2277ff6 100644 --- a/mrrs/nerf/nerfstudio.py +++ b/mrrs/nerf/nerfstudio.py @@ -5,7 +5,7 @@ import shutil from meshroom.core import desc -from mrrs.core.CondaNode import CondaNode +from meshroom.core.plugin import CondaNode from mrrs.nerf import ENV_FILE def convert_sfmdata_to_nerf(sfm_data, actual_path): diff --git a/mrrs/nerf/nerfstudio_export.py b/mrrs/nerf/nerfstudio_export.py index 601a405..ab06d62 100644 --- a/mrrs/nerf/nerfstudio_export.py +++ b/mrrs/nerf/nerfstudio_export.py @@ -2,7 +2,7 @@ import os from meshroom.core import desc -from mrrs.core.CondaNode import CondaNode +from meshroom.core.plugin import CondaNode from mrrs.nerf import ENV_FILE class NeRFStudioExport(CondaNode): diff --git a/mrrs/utils/ExecuteCmdConda.py b/mrrs/utils/ExecuteCmdConda.py index 6048618..129b33e 100644 --- a/mrrs/utils/ExecuteCmdConda.py +++ b/mrrs/utils/ExecuteCmdConda.py @@ -1,7 +1,7 @@ __version__ = "1.0" from meshroom.core import desc -from mrrs.core.CondaNode import CondaNode +from meshroom.core.plugin import CondaNode class ExecuteCmdConda(CondaNode): commandLine = '{commandLineValue}' diff --git a/setup.py b/setup.py index f07e723..f1afe86 100644 --- a/setup.py +++ b/setup.py @@ -1,36 +1,21 @@ """ -Installer for MRRS. Will handle dependencies and will add modules to meshroom. +Installer for the core module of mrrs. so it can be used in other plugins. """ -import os -import pathlib from setuptools import setup -#Setting env variable -#FIXME: this is not working, it only sets for the current env -MLEM_path = os.path.join(str(pathlib.Path(__file__).parent.resolve()), "mrrs", "nodes") -print("MRRS path "+MLEM_path) -os.environ['MESHROOM_NODES_PATH'] = MLEM_path -#move to a conda recipe -# https://docs.conda.io/projects/conda-build/en/latest/resources/build-scripts.html - setup( - name='MRRS', + name='MRRSCore', version='0.0.0', author='Matthieu Hog', author_email='matthieu.hog@technicolor.com', - packages=['mrrs'],# + packages=['mrrs.core'], #scripts=['bin/script1','bin/script2'], - #url='http://pypi.python.org/pypi/PackageName/', - #license='LICENSE.txt', - description='Meshroom Research plugin and library.', + url='https://github.com/alicevision/MeshroomResearch/', + license='LICENSE-MPL2.md', + description='Meshroom Research Core functions', long_description=open('README.md').read(), install_requires=["numpy", "pillow", "opencv-python-headless", "trimesh", "click"], - # "openexr-python"],#note add mode meshroom? with oiio - scripts=['clis/benchmark.py'], - extras_require= { - "onnx": ["onnxruntime"], - # "training": "tensorflow==2.4"#need conda - }, + # "oiio-python"], #FIXME: no pypy pacjages! need conda env or external install ) \ No newline at end of file From 631c50ea7a8e81059066f83d264ed40c32524de9 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 3 Jun 2024 14:46:01 +0200 Subject: [PATCH 08/32] moved pipelines --- README.md | 12 ++++++------ meshroomPlugin.json | 11 +++++++---- mrrs/3DR_benchmark/DepthMapComparison.py | 2 +- mrrs/3DR_benchmark/LoadDataset.py | 3 ++- .../3DR_benchmark/pipelines}/blended.mg | 0 .../3DR_benchmark/pipelines}/dtu.mg | 0 .../3DR_benchmark/pipelines}/eth3d.mg | 0 .../3DR_benchmark/pipelines}/nerf.mg | 0 .../3DR_benchmark/pipelines}/tanks_and_temples.mg | 0 .../colmap/pipelines}/colmap_project.mg | 0 .../colmap/pipelines}/colmap_template_light.mg | 0 .../reality_capture/pipelines}/tank_and_temples_1.mg | 0 .../reality_capture/pipelines}/test_import_export.mg | 0 13 files changed, 16 insertions(+), 12 deletions(-) rename {pipelines/benchmark => mrrs/3DR_benchmark/pipelines}/blended.mg (100%) rename {pipelines/benchmark => mrrs/3DR_benchmark/pipelines}/dtu.mg (100%) rename {pipelines/benchmark => mrrs/3DR_benchmark/pipelines}/eth3d.mg (100%) rename {pipelines/benchmark => mrrs/3DR_benchmark/pipelines}/nerf.mg (100%) rename {pipelines/benchmark => mrrs/3DR_benchmark/pipelines}/tanks_and_temples.mg (100%) rename {pipelines/colmap => mrrs/colmap/pipelines}/colmap_project.mg (100%) rename {pipelines/colmap => mrrs/colmap/pipelines}/colmap_template_light.mg (100%) rename {pipelines/reality_capture => mrrs/reality_capture/pipelines}/tank_and_temples_1.mg (100%) rename {pipelines/reality_capture => mrrs/reality_capture/pipelines}/test_import_export.mg (100%) diff --git a/README.md b/README.md index 0e77fe2..5a3d783 100644 --- a/README.md +++ b/README.md @@ -91,10 +91,10 @@ pip install -e ./MeshroomResearch Contributions to Meshroom-Research are welcomed! Here's a quick overview of the project structure: -- `mrrs/core`: Basic IOs, utilities, and common geometrical functions. -- `mrrs/pipeline`: Meshroom pipeline files. -- `mrrs/scripts`: Scripts, including benchmarking tools. -- `mrrs/nodes`: Interface nodes for integration into Meshroom. -- `mrrs/`: Code related to specific features. +- `mrrs/core`: The library side of MRRS, it contains basic IOs, utilities, and common geometrical functions to be used in other plugins. +- `mrrs/`: Contains the code and the nodes related to a plugin feature. +- `mrrs/meshrooPlugin.json`: Contains the list of plugins in this collection. -Utilize Meshroom's nodal UI for seamless integration, and refer to the [Meshroom's repo](https://github.com/alicevision/Meshroom) for creating custom nodes. We've introduced a new type of node, CondaNode, which automates Conda environment management for your convenience. +See meshroom's [plugin documentation](https://github.com/alicevision/Meshroom/tree/dev/plugin_system/meshroom/core) to leanrn how to make your own plugins. + +Utilize Meshroom's nodal UI for seamless integration, and refer to the [Meshroom's repo](https://github.com/alicevision/Meshroom) for creating custom nodes. We've introduced new types of node (eg. CondaNode and DockerNode), which automates environment management for your convenience. diff --git a/meshroomPlugin.json b/meshroomPlugin.json index 29d6f58..ace80a7 100644 --- a/meshroomPlugin.json +++ b/meshroomPlugin.json @@ -5,7 +5,8 @@ }, { "pluginName":"3DR Benchmark", - "nodesFolder":"mrrs/3DR_benchmark" + "nodesFolder":"mrrs/3DR_benchmark", + "pipelineFolder":"mrrs/3DR_benchmark/pipelines" }, { "pluginName":"Gaussian Splatting", @@ -13,7 +14,8 @@ }, { "pluginName":"Colmap", - "nodesFolder":"mrrs/colmap" + "nodesFolder":"mrrs/colmap", + "pipelineFolder":"mrrs/colmap/pipelines" }, { "pluginName":"Deep Depth Maps", @@ -25,7 +27,8 @@ }, { "pluginName":"Reality Capture", - "nodesFolder":"mrrs/reality_capture" + "nodesFolder":"mrrs/reality_capture", + "pipelineFolder":"mrrs/reality_capture/pipelines" }, { "pluginName":"Stereo Photometry", @@ -34,5 +37,5 @@ { "pluginName":"Utils", "nodesFolder":"mrrs/utils" - } + } ] \ No newline at end of file diff --git a/mrrs/3DR_benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py index b1616ee..e9fee68 100644 --- a/mrrs/3DR_benchmark/DepthMapComparison.py +++ b/mrrs/3DR_benchmark/DepthMapComparison.py @@ -8,7 +8,7 @@ from meshroom.core import desc from mrrs.core.ios import open_depth_map, save_exr -from mrrs.metrics.metrics import compute_depth_metric +from .metrics.metrics import compute_depth_metric class DepthMapComparison(desc.Node): diff --git a/mrrs/3DR_benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py index a58c164..5af929b 100644 --- a/mrrs/3DR_benchmark/LoadDataset.py +++ b/mrrs/3DR_benchmark/LoadDataset.py @@ -9,7 +9,8 @@ from mrrs.core.geometry import * from mrrs.core.ios import * -from mrrs.datasets import load_dataset + +from .datasets import load_dataset #FIXME:move this into a command line node? class LoadDataset(desc.Node): diff --git a/pipelines/benchmark/blended.mg b/mrrs/3DR_benchmark/pipelines/blended.mg similarity index 100% rename from pipelines/benchmark/blended.mg rename to mrrs/3DR_benchmark/pipelines/blended.mg diff --git a/pipelines/benchmark/dtu.mg b/mrrs/3DR_benchmark/pipelines/dtu.mg similarity index 100% rename from pipelines/benchmark/dtu.mg rename to mrrs/3DR_benchmark/pipelines/dtu.mg diff --git a/pipelines/benchmark/eth3d.mg b/mrrs/3DR_benchmark/pipelines/eth3d.mg similarity index 100% rename from pipelines/benchmark/eth3d.mg rename to mrrs/3DR_benchmark/pipelines/eth3d.mg diff --git a/pipelines/benchmark/nerf.mg b/mrrs/3DR_benchmark/pipelines/nerf.mg similarity index 100% rename from pipelines/benchmark/nerf.mg rename to mrrs/3DR_benchmark/pipelines/nerf.mg diff --git a/pipelines/benchmark/tanks_and_temples.mg b/mrrs/3DR_benchmark/pipelines/tanks_and_temples.mg similarity index 100% rename from pipelines/benchmark/tanks_and_temples.mg rename to mrrs/3DR_benchmark/pipelines/tanks_and_temples.mg diff --git a/pipelines/colmap/colmap_project.mg b/mrrs/colmap/pipelines/colmap_project.mg similarity index 100% rename from pipelines/colmap/colmap_project.mg rename to mrrs/colmap/pipelines/colmap_project.mg diff --git a/pipelines/colmap/colmap_template_light.mg b/mrrs/colmap/pipelines/colmap_template_light.mg similarity index 100% rename from pipelines/colmap/colmap_template_light.mg rename to mrrs/colmap/pipelines/colmap_template_light.mg diff --git a/pipelines/reality_capture/tank_and_temples_1.mg b/mrrs/reality_capture/pipelines/tank_and_temples_1.mg similarity index 100% rename from pipelines/reality_capture/tank_and_temples_1.mg rename to mrrs/reality_capture/pipelines/tank_and_temples_1.mg diff --git a/pipelines/reality_capture/test_import_export.mg b/mrrs/reality_capture/pipelines/test_import_export.mg similarity index 100% rename from pipelines/reality_capture/test_import_export.mg rename to mrrs/reality_capture/pipelines/test_import_export.mg From e08b839c2c929e1ca2cef4c06bc1dfa09a61c0cd Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 3 Jun 2024 16:19:15 +0200 Subject: [PATCH 09/32] modified relative import in benchmark --- mrrs/3DR_benchmark/CalibrationComparison.py | 2 +- mrrs/3DR_benchmark/CleanMesh.py | 2 +- mrrs/3DR_benchmark/DepthMapComparison.py | 2 +- mrrs/3DR_benchmark/LoadDataset.py | 2 +- mrrs/3DR_benchmark/MeshComparison.py | 2 +- mrrs/3DR_benchmark/datasets/__init__.py | 13 +++++++------ 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/mrrs/3DR_benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py index 121dd78..e412cbf 100644 --- a/mrrs/3DR_benchmark/CalibrationComparison.py +++ b/mrrs/3DR_benchmark/CalibrationComparison.py @@ -12,7 +12,7 @@ from .metrics.metrics import * class CalibrationComparison(desc.Node): - category = 'Meshroom Research' + category = 'MRRS - Benchmark' documentation = '''For each camera, compare its estimated parameters with a given groud truth.''' diff --git a/mrrs/3DR_benchmark/CleanMesh.py b/mrrs/3DR_benchmark/CleanMesh.py index 33df74a..1a70f61 100644 --- a/mrrs/3DR_benchmark/CleanMesh.py +++ b/mrrs/3DR_benchmark/CleanMesh.py @@ -10,7 +10,7 @@ class CleanMesh(CondaNode): #overides the env path env_file = ENV_FILE - category = 'Meshroom Research' + category = 'MRRS - Benchmark' commandLine = 'python "'+os.path.join(os.path.dirname(__file__),"..", "..", "metrics", "baptiste", "remove_invisible_faces.py")+'" {allParams}' gpu = desc.Level.NONE documentation = ''' ''' diff --git a/mrrs/3DR_benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py index e9fee68..23b60db 100644 --- a/mrrs/3DR_benchmark/DepthMapComparison.py +++ b/mrrs/3DR_benchmark/DepthMapComparison.py @@ -13,7 +13,7 @@ class DepthMapComparison(desc.Node): # size = desc.DynamicNodeSize('inputSfM') - category = 'Meshroom Research' + category = 'MRRS - Benchmark' documentation = '''For each camera, compare its depth maps to a given ground truth. The names of the original inputSfM file is used to retrieve the GT file, therefore must match. diff --git a/mrrs/3DR_benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py index 5af929b..989aeee 100644 --- a/mrrs/3DR_benchmark/LoadDataset.py +++ b/mrrs/3DR_benchmark/LoadDataset.py @@ -14,7 +14,7 @@ #FIXME:move this into a command line node? class LoadDataset(desc.Node): - category = 'Meshroom Research' + category = 'MRRS - Benchmark' documentation = '''Util node to open datasets with different data from the images in the .sfm''' diff --git a/mrrs/3DR_benchmark/MeshComparison.py b/mrrs/3DR_benchmark/MeshComparison.py index 3178f38..1b71cf7 100644 --- a/mrrs/3DR_benchmark/MeshComparison.py +++ b/mrrs/3DR_benchmark/MeshComparison.py @@ -15,7 +15,7 @@ def env_file(self): commandLine = 'python "'+os.path.join(os.path.dirname(__file__),"..", "..", "metrics", "chamfer_distance", "eval_pcd.py")+'" {allParams}' gpu = desc.Level.NONE - category = 'Meshroom Research' + category = 'MRRS - Benchmark' documentation = '''Calls the dtu benchmark metrics between two meshes''' inputs = [ diff --git a/mrrs/3DR_benchmark/datasets/__init__.py b/mrrs/3DR_benchmark/datasets/__init__.py index ae8a28d..6ee71d1 100644 --- a/mrrs/3DR_benchmark/datasets/__init__.py +++ b/mrrs/3DR_benchmark/datasets/__init__.py @@ -1,11 +1,12 @@ from mrrs.core.geometry import is_rotation_mat -from mrrs.datasets.eth3d import open_dataset as open_dataset_eth3d -from mrrs.datasets.baptiste import open_dataset as open_dataset_baptiste -from mrrs.datasets.blendedMVG import open_dataset as open_dataset_blended -from mrrs.datasets.dtu import open_dataset as open_dataset_dtu -from mrrs.datasets.alab import open_dataset as open_dataset_alab -from mrrs.datasets.nerf import open_dataset as open_dataset_nerf + +from .eth3d import open_dataset as open_dataset_eth3d +from .baptiste import open_dataset as open_dataset_baptiste +from .blendedMVG import open_dataset as open_dataset_blended +from .dtu import open_dataset as open_dataset_dtu +from .alab import open_dataset as open_dataset_alab +from .nerf import open_dataset as open_dataset_nerf def load_dataset(sfm_data, dataset_type): """ From e42b3aedf83c7a60bb746d26c603cee8c2df6f07 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Thu, 6 Jun 2024 15:43:58 +0200 Subject: [PATCH 10/32] moved back and started cleanup in deep feature matching --- .../DeepFeatureExtraction.py | 71 +++++ .../deep_feature_matching/LightGlueMatcher.py | 99 +++++++ mrrs/deep_feature_matching/LoftrMatcher.py | 115 ++++++++ mrrs/deep_feature_matching/MaskFeatures.py | 102 +++++++ mrrs/deep_feature_matching/README.md | 25 ++ mrrs/deep_feature_matching/VizFeatures.py | 196 +++++++++++++ mrrs/deep_feature_matching/VizTracks.py | 133 +++++++++ mrrs/deep_feature_matching/env.yaml | 17 ++ .../deep_feature_extraction.py | 96 +++++++ .../kornia_wrappers/light_glue_matcher.py | 161 +++++++++++ .../kornia_wrappers/loftr_matcher.py | 260 ++++++++++++++++++ .../kornia_wrappers/utils.py | 112 ++++++++ 12 files changed, 1387 insertions(+) create mode 100644 mrrs/deep_feature_matching/DeepFeatureExtraction.py create mode 100644 mrrs/deep_feature_matching/LightGlueMatcher.py create mode 100644 mrrs/deep_feature_matching/LoftrMatcher.py create mode 100644 mrrs/deep_feature_matching/MaskFeatures.py create mode 100644 mrrs/deep_feature_matching/README.md create mode 100644 mrrs/deep_feature_matching/VizFeatures.py create mode 100644 mrrs/deep_feature_matching/VizTracks.py create mode 100644 mrrs/deep_feature_matching/env.yaml create mode 100644 mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py create mode 100644 mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py create mode 100644 mrrs/deep_feature_matching/kornia_wrappers/loftr_matcher.py create mode 100644 mrrs/deep_feature_matching/kornia_wrappers/utils.py diff --git a/mrrs/deep_feature_matching/DeepFeatureExtraction.py b/mrrs/deep_feature_matching/DeepFeatureExtraction.py new file mode 100644 index 0000000..2466279 --- /dev/null +++ b/mrrs/deep_feature_matching/DeepFeatureExtraction.py @@ -0,0 +1,71 @@ +__version__ = "2.0" +import os + +from meshroom.core import desc + +from meshroom.core.plugin import CondaNode + + +EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/deep_feature_extraction.py") + +class DeepFeatureExtraction(CondaNode): + + category = 'Sparse Reconstruction' + documentation = ''' ''' + gpu = desc.Level.INTENSIVE + + commandLine = EXEC+" {allParams}" + + #overides the env path + envFile=os.path.dirname(__file__), 'env.yaml' + + inputs = [ + desc.File( + name="inputSfMData", + label="SfMData", + description="Input SfMData file.", + value="", + uid=[0], + ), + + desc.ChoiceParam( + name="method", + label="method", + description="method", + value="DISK", + values=["DISK", "SIFT"], + exclusive=True, + uid=[], + ), + + desc.IntParam( + name="maxKeypoints", + label="maxKeypoints", + description="Only keep maxKeypoints features.", + range=(0,100000000,1), + value=3000, + uid=[0], + ), + + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + value="info", + values=["fatal", "error", "warning", "info", "debug", "trace"], + exclusive=True, + uid=[], + ) + ] + + outputs = [ + desc.File( + name="outputFolder", + label="Output Folder", + description="Path to a folder in which the computed results are stored.", + value=desc.Node.internalFolder, + uid=[], + ) + ] + + diff --git a/mrrs/deep_feature_matching/LightGlueMatcher.py b/mrrs/deep_feature_matching/LightGlueMatcher.py new file mode 100644 index 0000000..9066811 --- /dev/null +++ b/mrrs/deep_feature_matching/LightGlueMatcher.py @@ -0,0 +1,99 @@ +__version__ = "2.0" +import os + +from meshroom.core import desc + +from meshroom.core.plugin import CondaNode + +EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/light_glue_matcher.py") + +class LightGlueMatching(CondaNode): + + category = 'Sparse Reconstruction' + documentation = ''' ''' + gpu = desc.Level.INTENSIVE + + commandLine = EXEC+" {allParams}" + + envFile=os.path.dirname(__file__), 'env.yaml' + + inputs = [ + desc.File( + name="inputSfMData", + label="SfMData", + description="Input SfMData file.", + value="", + uid=[0], + ), + + desc.File( + name="inputFeatureFolder", + label="inputFeatureFolder", + description="inputFeatureFolder", + value="", + uid=[0], + ), + + desc.IntParam( + name="keepNmatches", + label="keepNmatches", + description="Only keep the n strongest matches per view. 0 to disable", + range=(0,1000,1), + value=0, + uid=[0], + ), + + desc.FloatParam( + name="distanceThreshold", + label="distanceThreshold", + description="distanceThreshold", + range=(0.0,1.0,0.01), + value=0.0, + uid=[0], + ), + + desc.StringParam( + name='imageMaching', + label='imageMatching', + description='Method for image matching. Can be "all", "file" to use the file in imagePairs, or an integer defining a window around the framesId', + value="all", + uid=[0], + ), + + desc.File( + name='imagePairs', + label='imagePairs', + description='Optional file defining the images pairs to be matched', + value="", + uid=[0], + ), + + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + value="info", + values=["fatal", "error", "warning", "info", "debug", "trace"], + exclusive=True, + uid=[], + ) + ] + outputs = [ + desc.File( + name="outputFolder", + label="Output Folder", + description="Path to a folder in which the computed results are stored.", + value=desc.Node.internalFolder, + uid=[], + ), + desc.File( + name="matchesFolders", + label="Matches Folder", + description="Path to a folder in which the computed matches are stored.", + value=os.path.join(desc.Node.internalFolder, "matches"), + uid=[], + group="" + ) + ] + + diff --git a/mrrs/deep_feature_matching/LoftrMatcher.py b/mrrs/deep_feature_matching/LoftrMatcher.py new file mode 100644 index 0000000..1969ab9 --- /dev/null +++ b/mrrs/deep_feature_matching/LoftrMatcher.py @@ -0,0 +1,115 @@ +__version__ = "2.0" +import os + +from meshroom.core import desc + +from meshroom.core.plugin import CondaNode + +LOFTR_EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/loftr_matcher.py") + +class LoftrMatcher(CondaNode): + + category = 'Sparse Reconstruction' + documentation = ''' ''' + gpu = desc.Level.INTENSIVE + + commandLine = LOFTR_EXEC+" {allParams}" + + envFile=os.path.dirname(__file__), 'env.yaml' + + inputs = [ + desc.File( + name="inputSfMData", + label="SfMData", + description="Input SfMData file.", + value="", + uid=[0], + ), + + desc.IntParam( + name="keepNmatches", + label="keepNmatches", + description="Only keep the n strongest matches per view. 0 to disable", + range=(20000,100000,1), + value=0, + uid=[0], + ), + + desc.FloatParam( + name="confidenceThreshold", + label="confidenceThreshold", + description="Only keep the matches if their confidence hits this threshold.", + range=(0.0,1.0,0.01), + value=0.0, + uid=[0], + ), + + desc.StringParam( + name='imageMaching', + label='imageMatching', + description='Method for image matching. Can be "all", "file" to use the file in imagePairs, or an integer defining a window around the framesId', + value="all", + uid=[0], + ), + + desc.File( + name='imagePairs', + label='imagePairs', + description='Optional file defining the images pairs to be matched', + value="", + uid=[0], + ), + + desc.File( + name='maskFolder', + label='maskFolder', + description='Optional mask folder to remove matches from these zones', + value="", + uid=[0], + ), + + # desc.BoolParam( + # name="debugImages", + # label="debugImages", + # description="Will write image matches", + # value=False, + # uid=[0], + # ), + + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + value="info", + values=["fatal", "error", "warning", "info", "debug", "trace"], + exclusive=True, + uid=[], + ) + ] + outputs = [ + desc.File( + name="outputFolder", + label="Output Folder", + description="Path to a folder in which the computed results are stored.", + value=desc.Node.internalFolder, + uid=[], + ), + desc.File( + name="featuresFolders", + label="Features Folder", + description="Path to a folder in which the features matches are stored.", + value=os.path.join(desc.Node.internalFolder, "features"), + uid=[], + group="" + ), + desc.File( + name="matchesFolders", + label="Matches Folder", + description="Path to a folder in which the computed matches are stored.", + value=os.path.join(desc.Node.internalFolder, "matches"), + uid=[], + group="" + ) + ] + + diff --git a/mrrs/deep_feature_matching/MaskFeatures.py b/mrrs/deep_feature_matching/MaskFeatures.py new file mode 100644 index 0000000..3b43ef2 --- /dev/null +++ b/mrrs/deep_feature_matching/MaskFeatures.py @@ -0,0 +1,102 @@ +__version__ = "3.0" + +import os +import json + +import numpy as np +from meshroom.core import desc + +from mrrs.core.ios import * +from .utils import open_descriptor_file, write_descriptor_file + +class MaskFeatures(desc.Node): + + category = 'Meshroom Research' + documentation = '''''' + + inputs = [ + desc.File( + name='inputSfM', + label='SfMData', + description='SfMData file.', + value='', + uid=[0], + ), + + desc.File( + name="featureFolder", + label="Feature Folder", + description="Featurefolder", + value="", + uid=[0], + ), + + desc.File( + name="maskFolder", + label="Mask Folder", + description="maskFolder", + value="", + uid=[0], + ), + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[0], + ), + + ] + + outputs = [ + desc.File( + name='outputFolder', + label='outputFolder', + description='outputFolder', + value=desc.Node.internalFolder, + uid=[], + group='', + ) + ] + + def processChunk(self, chunk): + """ + """ + chunk.logManager.start(chunk.node.verboseLevel.value) + if chunk.node.inputSfM.value == '': + raise RuntimeError("No inputSfM specified") + if chunk.node.maskFolder.value == '': + raise RuntimeError("No maskFolder specified") + + sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) + feature_files = os.listdir(chunk.node.featureFolder.value) + print("%d feature files detected"%len(feature_files)) + + print("Masking features") + for view in sfm_data["views"]: + image_uid = view["viewId"] + keypoint_file = [os.path.join(chunk.node.featureFolder.value, ff) for ff + in feature_files if ((image_uid in ff) and ff.endswith(".feat"))][0] + desc_file = [os.path.join(chunk.node.featureFolder.value, ff) for ff + in feature_files if ((image_uid in ff) and ff.endswith(".desc"))][0] + mask_file = os.path.join(chunk.node.maskFolder.value, image_uid+".exr") + + keypoints = np.loadtxt(keypoint_file) + mask = open_image(mask_file).astype(np.bool) + keypoints_nn = np.round(keypoints).astype(np.int32) + valid_mask=mask[keypoints_nn[:,1],keypoints_nn[:,0],0] + valid_keypoints = keypoints[valid_mask,:] + print("Saving %d keypoints"%valid_keypoints.shape[0]) + with open(os.path.join(chunk.node.outputFolder.value, os.path.basename(keypoint_file)), "w") as kpf: + for kp_x, kp_y in valid_keypoints[:,0:2]: + kpf.write("%f %f 0 0\n"%(kp_x, kp_y)) + #FIXME: need to remove coresp descriptor + descriptors=open_descriptor_file(desc_file) + valid_descriptors = descriptors[valid_mask,:] + write_descriptor_file(valid_descriptors, os.path.join(chunk.node.outputFolder.value, os.path.basename(desc_file))) + + chunk.logManager.end() + diff --git a/mrrs/deep_feature_matching/README.md b/mrrs/deep_feature_matching/README.md new file mode 100644 index 0000000..16c1e2b --- /dev/null +++ b/mrrs/deep_feature_matching/README.md @@ -0,0 +1,25 @@ +# Deep feature matching + +This module is dedicated to test deep feature matches in meshroom. + +It is using the [Kornia](https://github.com/kornia/kornia) library to wrap the descriptors and detectors. + +The env.yaml is used in meshroom to automatically build the environnements. + +Utils contains the IOs for the features/matches. + +# Deep feature extraction + +This node is used to isolates the feature extraction. + +! it relies on a meshroom PR that is not yet merge to have arbitrary desriptors. + +# LightGlue Matcher + +Uses the [Lightglue](https://github.com/cvg/LightGlue) matcher to matches the computed feature. + + + # Loftr Matcher + + Uses the image-to-image matcher of LOFTR, we use the feature coordinates to establish corespondances. + diff --git a/mrrs/deep_feature_matching/VizFeatures.py b/mrrs/deep_feature_matching/VizFeatures.py new file mode 100644 index 0000000..d1fef4a --- /dev/null +++ b/mrrs/deep_feature_matching/VizFeatures.py @@ -0,0 +1,196 @@ +__version__ = "3.0" + +import os +import json + +import cv2 +from mrrs.deep_feature_matching.utils import open_matches +import numpy as np + +from meshroom.core import desc + +from mrrs.core.ios import * +from mrrs.core.geometry import * + +def draw_keypoints(image, keypoints, downsample=1, p = 2, o = 0): + for kp in keypoints[::downsample]: + image[int(kp[1])-p:int(kp[1])+p, o+int(kp[0])-p:o+int(kp[0])+p, :]=[0,255,0] + return image + +def get_best_matching_view(view_matches): + values = list(view_matches.values()) + lengths = [v.shape[0] for v in values] + keys = list(view_matches.keys()) + index_max = np.argmax(lengths) + return keys[index_max], values[index_max] + +class VizFeatures(desc.Node): + + category = 'Meshroom Research' + documentation = '''''' + + inputs = [ + desc.File( + name='inputSfM', + label='SfMData', + description='SfMData file.', + value='', + uid=[0], + ), + + desc.File( + name="featureFolder", + label="Feature Folder", + description="Featurefolder", + value="", + uid=[0], + ), + + desc.File( + name="matcheFolder", + label="Match Folder", + description="Featurefolder", + value="", + uid=[0], + ), + + desc.IntParam( + name="keepMatches", + label="keepMatches", + description="Only display first n matches", + range=(1,1000,1), + value=0, + uid=[0], + ), + + desc.BoolParam( + name="matchOnly", + label="matchOnly", + description="Only display the matches", + value=True, + uid=[0], + ), + + desc.IntParam( + name="markerSize", + label="markerSize", + description="marker wize /2", + range=(1,1000,1), + value=1, + uid=[0], + ), + + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[0], + ), + + ] + + outputs = [ + desc.File( + name='outputFolder', + label='outputFolder', + description='outputFolder', + value=desc.Node.internalFolder, + uid=[], + group='', + ), + desc.File( + name='featureViz', + label='featureViz', + description='featureViz', + semantic='image', + value=os.path.join(desc.Node.internalFolder, 'features_.png'), + uid=[], + group='', + ), + desc.File( + name='matchingViz', + label='matchingViz', + description='matchingViz', + semantic='image', + value=os.path.join(desc.Node.internalFolder, 'matches_.png'), + uid=[], + group='', + ), + ] + + def processChunk(self, chunk): + """ + """ + chunk.logManager.start(chunk.node.verboseLevel.value) + if chunk.node.inputSfM.value == '': + raise RuntimeError("No inputSfM specified") + + sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) + feature_files = os.listdir(chunk.node.featureFolder.value) + print("%d feature files detected"%len(feature_files)) + if not chunk.node.matchOnly.value: + print("Writting features") + for view in sfm_data["views"]: + image_path = view["path"] + image_uid = view["viewId"] + image = open_image(image_path) + keypoint_file = [os.path.join(chunk.node.featureFolder.value, ff) for ff in feature_files if ((image_uid in ff) and ff.endswith(".feat"))][0] + keypoints = np.loadtxt(keypoint_file) + image = draw_keypoints(image, keypoints, p=chunk.node.markerSize.value) + save_image(os.path.join(chunk.node.outputFolder.value, "features_"+image_uid+".png"),image) + + if chunk.node.matcheFolder.value != "": + print("Writting matches") + chunk.logger.info('Displaying Matching') + match_file = [os.path.join(chunk.node.matcheFolder.value, mf) for mf in os.listdir(chunk.node.matcheFolder.value) if mf.endswith(".txt")][0] + chunk.logger.info('Opening matches') + matches = open_matches(match_file) + chunk.logger.info('Done open') + for view_id_0 in matches.keys(): + chunk.logger.info('Matching for view '+view_id_0) + #for now, only select the best matched view (the one with most matches) + # view_id_1, matches_0_to_1=get_best_matching_view(matches[view_id_0]) + # chunk.logger.info('Best matcing for view '+view_id_0+" is "+view_id_1+ " (%d matches)"%len(matches_0_to_1)) + for view_id_1 in matches[view_id_0].keys(): + matches_0_to_1 = matches[view_id_0][view_id_1] + if (( matches_0_to_1[:,0].shape[0] != np.unique(matches_0_to_1[:,0]).shape[0] ) + or ( matches_0_to_1[:,1].shape[0] != np.unique(matches_0_to_1[:,1]).shape[0] ) ): + # raise RuntimeError("Found duplicated points fo images "+view_id_0+" "+view_id_1+"\n. ") + print("Found duplicated points fo images "+view_id_0+" "+view_id_1+"\n. ") + + if chunk.node.matchOnly.value: #if match only, will only display line + image_file_0 = [view["path"] for view in sfm_data["views"] if view["viewId"]==view_id_0][0] + image_file_1 = [view["path"] for view in sfm_data["views"] if view["viewId"]==view_id_1][0] + else: + image_file_0 = os.path.join(chunk.node.outputFolder.value, "features_"+view_id_0+".png") + image_file_1 = os.path.join(chunk.node.outputFolder.value, "features_"+view_id_1+".png") + + image_0 = open_image(image_file_0) + image_1 = open_image(image_file_1) + + match_image = np.concatenate([image_0, image_1], axis=1) + keypoint_file_0 = [os.path.join(chunk.node.featureFolder.value, ff) for ff in feature_files if ((view_id_0 in ff) and ff.endswith(".feat"))][0] + keypoint_file_1 = [os.path.join(chunk.node.featureFolder.value, ff) for ff in feature_files if ((view_id_1 in ff) and ff.endswith(".feat"))][0] + keypoints_0 = np.loadtxt(keypoint_file_0) + keypoints_1 = np.loadtxt(keypoint_file_1) + o=image_0.shape[1] + for m in matches_0_to_1[0:chunk.node.keepMatches.value]: + if m[0]>keypoints_0.shape[0]: + raise RuntimeError("ERROR FEATURE INDEX IN MATCH OUTSIDE OF LISTED FEATURES FOR %s (%d vs %d)"%(view_id_0, m[0],keypoints_0.shape[0])) + if m[1]>keypoints_1.shape[0]: + raise RuntimeError("ERROR FEATURE INDEX IN MATCH OUTSIDE OF LISTED FEATURES FOR %s (%d vs %d)"%(view_id_1, m[1],keypoints_1.shape[0])) + kp0 = keypoints_0[m[0]] + kp1 = keypoints_1[m[1]] + if chunk.node.matchOnly.value: + match_image=draw_keypoints(match_image, np.asarray( [(int(kp0[0]),int(kp0[1])), + (int(o+kp1[0]),int(kp1[1]))]) ) + cv2.line(match_image, (int(kp0[0]),int(kp0[1])), (int(o+kp1[0]),int(kp1[1])), color = [0,0,255]) + save_image(os.path.join(chunk.node.outputFolder.value, + "matches_"+view_id_0+"_"+view_id_1+".png"), match_image) + + chunk.logManager.end() + diff --git a/mrrs/deep_feature_matching/VizTracks.py b/mrrs/deep_feature_matching/VizTracks.py new file mode 100644 index 0000000..fb6b85b --- /dev/null +++ b/mrrs/deep_feature_matching/VizTracks.py @@ -0,0 +1,133 @@ +__version__ = "3.0" + +import os +import json + +import cv2 + +import numpy as np + +from meshroom.core import desc + +from mrrs.core.ios import * +from mrrs.core.geometry import * + +class VizTracks(desc.Node): + + category = 'Meshroom Research' + documentation = '''''' + + inputs = [ + desc.File( + name='inputSfM', + label='SfMData', + description='SfMData file.', + value='', + uid=[0], + ), + + desc.File( + name='inputTracks', + label='inputTracks', + description='inputTracks', + value='', + uid=[0], + ), + + desc.File( + name="featureFolder", + label="Feature Folder", + description="Featurefolder", + value="", + uid=[0], + ), + + desc.ChoiceParam( + name="describerTypes", + label="Describer Types", + description="Describer types to keep.", + value="dspsift", + values=["sift", "sift_float", "sift_upright", + "dspsift", "akaze", "akaze_liop", + "akaze_mldb", "cctag3", "cctag4", "sift_ocv", + "akaze_ocv", "tag16h5", "unknown"], + exclusive=True, + uid=[0] + ), + + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + value="info", + values=["fatal", "error", "warning", "info", "debug", "trace"], + exclusive=True, + uid=[], + ) + ] + + outputs = [ + desc.File( + name='outputFolder', + label='outputFolder', + description='outputFolder', + value=desc.Node.internalFolder, + uid=[], + group='', + ), + desc.File( + name='trackViz', + label='trackViz', + description='trackViz', + semantic='image', + value=os.path.join(desc.Node.internalFolder, 'tracks_.png'), + uid=[], + group='', + ), + ] + + def processChunk(self, chunk): + """ + """ + chunk.logManager.start(chunk.node.verboseLevel.value) + if chunk.node.inputSfM.value == '': + raise RuntimeError("No inputSfM specified") + + sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) + track_data=json.load(open(chunk.node.inputTracks.value,"r")) + + all_view_uids = [v["viewId"] for v in sfm_data["views"]] + uids_to_ids = {v:all_view_uids.index(v) for v in all_view_uids if v != ""} + + feature_files = [os.path.join(chunk.node.featureFolder.value, + uid+"."+chunk.node.describerTypes.value+".feat") for uid in all_view_uids] + features = [np.loadtxt(ff) for ff in feature_files] + + # for each image + for view in sfm_data["views"]: + image_path = view["path"] + image_uid = view["viewId"] + image = open_image(image_path) + #for all tracks + for track in track_data: + track_views = [str(v) for v,_ in track[1]["featPerView"]] + #only if the track is visible on the view + if image_uid not in track_views: + continue + #get the features and draw them on the reference image + feat_color = np.random.randint(0,255, size=3).tolist() + prev_feat=None + for view_uid, feature_id in track[1]["featPerView"]: + view_index = uids_to_ids[str(view_uid)] + feat = features[view_index][feature_id["featureId"]] + if prev_feat is not None: + image=cv2.line( image, (int(prev_feat[0]),int(prev_feat[1])), + (int(feat[0]),int(feat[1])), color = feat_color, + thickness=int(image.shape[0]/640)+1) + prev_feat=feat + + save_image(os.path.join(chunk.node.outputFolder.value, "tracks_"+image_uid+".png"),image) + + + chunk.logManager.end() + diff --git a/mrrs/deep_feature_matching/env.yaml b/mrrs/deep_feature_matching/env.yaml new file mode 100644 index 0000000..17d4a58 --- /dev/null +++ b/mrrs/deep_feature_matching/env.yaml @@ -0,0 +1,17 @@ +name: deepFeatures +channels: + - nvidia + - pytorch +dependencies: + - python==3.8 + - pytorch==1.13.1 + - pytorch-cuda==11.6 + - torchvision==0.14.1 + - conda-forge::kornia==0.7.0 + - pip + - pip: + - click + + + + diff --git a/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py b/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py new file mode 100644 index 0000000..84194b1 --- /dev/null +++ b/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py @@ -0,0 +1,96 @@ +import json +import os +import click +import numpy as np + +import kornia +import torch +from torch.nn.functional import pad + +from .utils import time_it, open_and_prepare_image, write_descriptor_file + +FEATURE_SIZE = 128 + +#todo add +# FEATURE_TYPES = ["DISK", "SIFTFeature", "SIFTFeatureScaleSpace", +# "GFTTAffNetHardNet", "KeyNetAffNetHardNet", "KeyNetHardNet"] +#todo add option to sort features + +@click.command() +@click.option('--inputSfMData', help='Input sfm data') +@click.option('--outputFolder', help='Output to store the results in') +@click.option('--method', type=click.Choice(["DISK", "SIFT"]), help="Feature extraction method") +@click.option('--maxKeypoints', type=click.INT, help='Will set the maximum nb of keyoint to maxKeypoints') +@click.option('--gridKeypoints', type=click.INT, help='maxKeypoints')#FIXME: TODO +@click.option('--verboseLevel', help='.')#FIXME: todo + +def run_extraction(inputsfmdata, outputfolder, method, maxkeypoints, gridkeypoints, verboselevel): + """ + run the feature detection and description + """ + #load sfmdata + print("Loading sfm data") + with open(inputsfmdata, "r") as json_file: + sfm_data = json.load(json_file) + nb_image = len(sfm_data["views"]) + + #init model + print("Loading model") + device = torch.device('cuda:0') + feature_model = None + if method == "DISK": + feature_model = kornia.feature.DISK.from_pretrained("depth").to(device)#or epipolar + elif method == "SIFT": + feature_model = kornia.feature.SIFTFeature(num_features=maxkeypoints, device=device) + else: + raise RuntimeError("Method no valid") + feature_model=feature_model.to(device) + + #loop over images + for view_index_0 in range(nb_image): + with time_it() as t: + timage_0, uid_image_0, _,_ = open_and_prepare_image(sfm_data, view_index_0, device, grayscale=False) + if method == "DISK": + #pad image to be divisible by 16 + image_size = np.asarray(timage_0.shape[2:4]) + new_image_size = (np.ceil(image_size/16)*16).astype(np.int32) + padding= new_image_size-image_size + if padding[0] !=0 or padding[1] != 0: + timage_0 = pad(timage_0, (0,0,padding[0],padding[1]) , value=0)#bad on right/bottom + #get features from image + window_size = 5 + score_threshold = 0 + with torch.no_grad(): + output = feature_model(timage_0, maxkeypoints) + keypoints=output[0].keypoints.cpu() + descriptors=output[0].descriptors.cpu() + #remove keypoints/descriptors in padding + outside = (keypoints[:,0]>=image_size[1]) | (keypoints[:,1]>=image_size[0]) + print("removing %d kp"%np.count_nonzero(outside)) + keypoints = keypoints[~outside] + descriptors = descriptors[~outside] + elif method == "SIFT": + timage_0 = kornia.color.rgb_to_grayscale(timage_0) + with torch.no_grad(): + output = feature_model(timage_0, maxkeypoints) + keypoints=output[0].cpu() + descriptors=output[2].cpu() + + #write all keypoints + kp_filename = os.path.join(outputfolder,uid_image_0+".unknown.feat") + print("Saving %d keypoints"%keypoints.shape[0]) + with open(kp_filename, "w") as kpf: + for kp_x, kp_y in keypoints: + kpf.write("%f %f 0 0\n"%(kp_x, kp_y)) + + # write descriptors as in aliceVision + # https://github.com/alicevision/AliceVision/blob/develop/src/aliceVision/feature/Descriptor.hpp#L255C13-L255C33 + desk_filename = os.path.join(outputfolder,uid_image_0+".unknown.desc") + #TODO: pad descrippr descriptors.shape[0] to FEATURE_SIZE + write_descriptor_file(descriptors, desk_filename) + + remaining = (nb_image-view_index_0-1)*float(t) + print("Extraction done in %fs (%d desc of size %d est remaining %fs/%fm)"%(t,descriptors.shape[0], descriptors.shape[1],remaining, remaining/60.0)) + +if __name__ == '__main__': + run_extraction() \ No newline at end of file diff --git a/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py b/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py new file mode 100644 index 0000000..91eefeb --- /dev/null +++ b/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py @@ -0,0 +1,161 @@ +import json +import os +import sys + +import click +import numpy as np + +import kornia +import torch + +from .utils import time_it, open_image_grapĥ, open_descriptor_file + +@click.command() +@click.option('--inputSfMData', help='Input sfm data') +@click.option('--inputFeatureFolder', help='Input feature folder') +@click.option('--outputFolder', help='Output to store the results in') +@click.option('--imageMaching', default="all", help=("Method to select the views to be matched. 'all' will match all the views." + +"If a number is passed, will assume sequence and the number is going to be half window around a frame to compute the maches into" + +"If 'file' open the matches from the file in imagePairs")) +@click.option('--imagePairs', default="", help=("Image pair file to be used for the image matching")) +@click.option('--keepNmatches', default=0, type=int, help='If specified will keep the n first matches between views') +@click.option('--distanceThreshold', default=0.0, type=float, help='If specified will only keep the matches with at least this confidence') +@click.option('--verboseLevel', help='.')#FIXME: todo +def run_matching(inputsfmdata, outputfolder, inputfeaturefolder, + imagemaching, imagepairs, + keepnmatches, distancethreshold, + verboselevel): #note: lower caps + """ + """ + print("Hello") + + extention = "unknown" + feature_type = "disk"#FIXME: parameter + + def open_and_prepare_features(sfm_data, index, device): + uid_image = sfm_data["views"][index]["viewId"] + frame_id = int(sfm_data["views"][index]["frameId"]) + image_size = (int(sfm_data["views"][index]["width"]), + int(sfm_data["views"][index]["height"])) + keypoint_file=os.path.join(inputfeaturefolder, uid_image+"."+extention+".feat") + features=np.loadtxt(keypoint_file)[:,0:2] + descriptor_file=os.path.join(inputfeaturefolder, uid_image+"."+extention+".desc") + descritors = open_descriptor_file(descriptor_file) + features = torch.from_numpy(features).to(device).to(torch.float32) + descritors = torch.from_numpy(descritors).to(device).to(torch.float32) + return features, descritors, uid_image, frame_id, image_size + + #Load sfmdata + print("Loading sfm data") + with open(inputsfmdata, "r") as json_file: + sfm_data = json.load(json_file) + nb_image = len(sfm_data["views"]) + all_view_ids = [v["viewId"] for v in sfm_data["views"]] + + #opening imagematching file if any + if imagemaching == "file": + print("Opening imagepairs file:") + image_pairs=open_image_grapĥ(imagepairs, nb_image) + + #creates output folders + print("Creating output folder") + matches_folder = os.path.join(outputfolder, "matches") + os.makedirs(matches_folder, exist_ok=True) + + #init model + print("Loading model") + device = torch.device('cuda:0') + lightglue_model = kornia.feature.LightGlue(feature_type).to(device) + + print("Running matching") + with time_it() as total_time: + for view_index_0 in range(nb_image): + #open and prepare fearures for image 0 + features_0, descritors_0, uid_image_0, frame_id_0, image_size_0 = open_and_prepare_features(sfm_data, view_index_0, device) + + #depending of the matching method, we get a list of views to match + view_indices_1 = [] + if imagemaching.isnumeric(): + view_indices_1 = [i for i,view in enumerate(sfm_data["views"]) + if abs(int(view["frameId"])-frame_id_0)<=int(imagemaching)] + elif imagemaching == "all": + view_indices_1=range(nb_image) + elif imagemaching == "file": + view_uids_1 = image_pairs[view_index_0] + view_indices_1 = [all_view_ids.index(v) for v in view_uids_1 if v != ""] + elif imagemaching == "uni": + view_indices_1=range(view_index_0, nb_image) + else: + raise RuntimeError("Invalid imagemaching argument") + + with time_it() as t: + for view_index_1 in view_indices_1: + #if same image, skip + if view_index_0 == view_index_1: + continue + # print("\nMatches images %d to %d\n"%(view_index_0, view_index_1)) + + #open and prepare second image + features_1, descritors_1, uid_image_1, _, _ = open_and_prepare_features(sfm_data, view_index_1, device) + + if (descritors_0.shape[0] < 2) or (descritors_0.shape[0] < 2): + print("Not enough keypoints, skipping\n") + continue + + hw_0 = torch.Tensor(image_size_0) + hw_1 = torch.Tensor(image_size_0)#FIXME: assumes same image size + # print(features_0.shape) + # print(features_1.shape) + # print(descritors_0.shape) + # print(descritors_1.shape) + input_dict = { + "image0": { + "keypoints": torch.unsqueeze(features_0, dim=0), + "descriptors": torch.unsqueeze(descritors_0, dim=0), + "image_size": torch.unsqueeze(hw_0.to(device), dim=0), + }, + "image1": { + "keypoints": torch.unsqueeze(features_1, dim=0), + "descriptors": torch.unsqueeze(descritors_1, dim=0), + "image_size": torch.unsqueeze(hw_1.to(device), dim=0), + } + } + pred = lightglue_model(input_dict) + matches, distance = pred["matches"], pred["scores"] + + distance=distance[0].detach().cpu().numpy() + matches=matches[0].detach().cpu().numpy() + + #sort by confidence (distance descending) + order = np.argsort(distance) + matches=matches[order] + + # print(matches.shape) + + #if we dont define a max nb of match, will write all matches, otherwise will write only the n best matches + if keepnmatches == 0: + nb_to_write = matches.shape[0] + else: + nb_to_write = min(keepnmatches, matches.shape[0]) + + #if we passed confidenceThreshold, will find the index dynamically such that the remaining matches keep the trheshold + if distancethreshold !=0: + #will return index of first occurence of confidence bellow the threshold=> index when we stop + nb_to_write = np.argmin(distance feature_map_size[1]) or (y > feature_map_size[0]): + raise RuntimeError("Feature %f %f outside of feature map (%d %d) vs (%d %d)"%(X[0],X[1],x,y,feature_map_size[0], feature_map_size[1])) + linear_index = feature_map_size[1]*y+x + return linear_index + # for x,y in zip(all_keypoints_0_x, all_keypoints_0_y) : + # print(map_indices((x,y))) + # exit(0) + print("\nDone in %f seconds"%t) + + print("Running matching") + with time_it() as total_time: + for view_index_0 in range(nb_image): + #open and prepare + (timage_0, uid_image_0, + image_0, frame_id_0) = open_and_prepare_image(sfm_data,view_index_0, device) + + #depending of the matching method, we get a list of views to match + view_indices_1 = [] + if imagemaching.isnumeric(): + view_indices_1 = [i for i,view in enumerate(sfm_data["views"]) + if abs(int(view["frameId"])-frame_id_0)<=int(imagemaching)] + elif imagemaching == "all": + view_indices_1=range(nb_image) + elif imagemaching == "uni": + view_indices_1=range(view_index_0, nb_image) + elif imagemaching == "file": #FIXME: first index in list is the id of the view!!! + #get view from graph + # each line corresponds to an image in the same order as in the sfm? #FIXME: to check + #FIXME : non bijective matching matrix + view_uids_1 = image_pairs[view_index_0] + view_indices_1 = [all_view_ids.index(v) for v in view_uids_1 if v != ""] + else: + raise RuntimeError("Invalid imagemaching argument") + + with time_it() as t: + for view_index_1 in view_indices_1: + #if same image, skip + if view_index_0 == view_index_1: + continue + print("\nMatches images %d to %d\n"%(view_index_0, view_index_1)) + + #open and prepare second image + timage_1, uid_image_1, image_1, _ = open_and_prepare_image(sfm_data,view_index_1, device) + + #run loftr and get results + out = loftr_model({"image0": timage_0, "image1": timage_1}) + keypoints_0=out["keypoints0"].to('cpu').numpy() + keypoints_1=out["keypoints1"].to('cpu').numpy() + confidences=out["confidence"].to('cpu').numpy() + + nb_keypoint = keypoints_0.shape[0] + print("Found %d matches"%nb_keypoint) + + #sort by confidence (descending) + order = np.argsort(-confidences) + keypoints_0=keypoints_0[order] + keypoints_1=keypoints_1[order] + confidences=confidences[order] + + #if we keep the original matches + if not coarsematch: + #Write features on img 2 as brand new features + with open(os.path.join(feature_folder,uid_image_1+extention), "a+") as kpf: + for kp in keypoints_1: + kpf.write("%f %f 0 0\n"%(kp[0], kp[1])) + + #if masks defined + if len(masks) > 0: + mask_0 = masks[uid_image_0] + mask_1 = masks[uid_image_1] + nn_keypoints_0 = np.round(keypoints_0).astype(np.int32) + nn_keypoints_1 = np.round(keypoints_1).astype(np.int32) + mask_0_kp = mask_0[nn_keypoints_0[:,1], nn_keypoints_0[:,0],0] + mask_1_kp = mask_1[nn_keypoints_1[:,1], nn_keypoints_1[:,0],0] + valid_kp = mask_0_kp&mask_1_kp + #remove masked keypoints + keypoints_0 = keypoints_0[valid_kp,:] + keypoints_1 = keypoints_1[valid_kp,:] + confidences = confidences[valid_kp] + nb_keypoint = keypoints_0.shape[0] + print("%d matches after masking"%nb_keypoint) + + if coarsematch: + #FIXME: not elegant, better get the index of the match from loftr + #removes the duplicate indices, can happen if the refine move the keypoint outside the initial patch + # keypoint_0_index_matched = {} + # keypoint_0_indices = [map_indices(k) for k in keypoints_0] + keypoint_1_indices = [map_indices(k) for k in keypoints_1] + keypoint_1_index_matched = {} + to_del = [] + print("%d unique match found"%np.unique(keypoint_1_indices).shape[0]) + for kp_indx in range(nb_keypoint): + keypoint_1_index=keypoint_1_indices[kp_indx] + # keypoint_0_index=keypoint_0_indices[kp_indx] + if keypoint_1_index in keypoint_1_index_matched.keys(): + # print("Keypoint %f %f already matched with %f %f with higher confidence, discarding"%(keypoints_1[kp_indx][0], + # keypoints_1[kp_indx][1], + # keypoint_1_index_matched[keypoint_1_index][0], + # keypoint_1_index_matched[keypoint_1_index][1] + # )) + to_del.append(kp_indx) + else: + keypoint_1_index_matched[keypoint_1_index]=keypoints_1[kp_indx] + print("Found %d duplicates, removing"%len(to_del)) + keypoints_0=np.delete(keypoints_0,to_del, axis=0) + keypoints_1=np.delete(keypoints_1,to_del, axis=0) + confidences=np.delete(confidences,to_del, axis=0) + nb_keypoint = keypoints_0.shape[0] + + #if we dont define a max nb of match, will write all matches, otherwise will write only the n best matches + if keepnmatches == 0: + nb_to_write = nb_keypoint + else: + nb_to_write = min(keepnmatches, nb_keypoint) + #if we passed confidenceThreshold, will find the index dynamically such that the remaining matches keep the trheshold + if confidencethreshold != 0: + #will return index of first occurence of confidence bellow the threshold=> index when we stop + nb_to_write = np.argmin(confidences>confidencethreshold) + + keypoint_0_indices = [map_indices(k) for k in keypoints_0] + keypoint_1_indices = [map_indices(k) for k in keypoints_1] + + print("Writting %d matches"%nb_to_write) + #Write matches, note "0." beacause mewhroom suports several matches files for batching + with open(os.path.join(matches_folder,"0.matches.txt"), "a+") as mf: + mf.write("%s %s\n"%(uid_image_0, uid_image_1)) + mf.write("1\n") + mf.write("sift %d\n"%(nb_to_write))#for now we disuise as sift + for kp_indx in range(nb_to_write):#save feature index with offset for each view + # print("%d/%d"%(kp_indx, nb_to_write)) + keypoint_0_index = keypoint_0_indices[kp_indx]#retrieve index in the pre-written features + if not coarsematch:#if we keep the normal matches + keypoint_1_index = kp_indx+nb_features[view_index_1]#index is offsetted by the allready written features + else: + keypoint_1_index = keypoint_1_indices[kp_indx] + mf.write("%d %d\n"%(keypoint_0_index, keypoint_1_index)) + if not coarsematch: + nb_features[view_index_1]+=nb_keypoint#nb_to_write + # exit(0) + print("Matches for view %d/%d done for %d views, in %fs (est. remaining if constant %fm)"%(view_index_0+1, + nb_image, len(view_indices_1)-1, t, (nb_image-view_index_0)*float(t)/60.0), end="\n") + print("\n") + print("Matching done in %fs"%total_time) + +if __name__ == '__main__': + run_matching() \ No newline at end of file diff --git a/mrrs/deep_feature_matching/kornia_wrappers/utils.py b/mrrs/deep_feature_matching/kornia_wrappers/utils.py new file mode 100644 index 0000000..9f7102f --- /dev/null +++ b/mrrs/deep_feature_matching/kornia_wrappers/utils.py @@ -0,0 +1,112 @@ +from PIL import Image +import numpy as np +import struct + +import kornia + +def open_and_prepare_image(sfm_data, index, device, grayscale=True): + """ + Opens and prepare an image tensor from sfm data + """ + + image_0 = np.asarray(Image.open(sfm_data["views"][index]["path"]))#FIXME: replace will call to open_image + image_0 = image_0[:,:,0:3] + uid_image_0 = sfm_data["views"][index]["viewId"] + frame_id = int(sfm_data["views"][index]["frameId"]) + timage_0=kornia.utils.image_to_tensor(image_0, False).float() / 255. + if grayscale: + timage_0 = kornia.color.rgb_to_grayscale(timage_0) + timage_0=timage_0.to(device) + return timage_0, uid_image_0, image_0, frame_id + + +#FIXME: call to mrrs, see with kelian conda node +import time +class time_it(): + """ + Context class to measure elapsed time. + Can be cast to float. + """ + def __init__(self): + self.start_time = np.nan + self.end_time = np.nan + def __enter__(self): + self.start_time = time.time() + return self + def __exit__(self, type, value, traceback): + self.end_time= time.time() + def __float__(self): + return float(self.end_time- self.start_time) + def __coerce__(self, other): + return (float(self), other) + def __str__(self): + return str(float(self)) + def __repr__(self): + return str(float(self)) + +#FIXME: all of this should be in core +def open_image_grapĥ(imagepairs, nb_image): + with open(imagepairs, 'r') as matchfile: + matches_raw = matchfile.readlines() + #one line per image + image_pairs = [line.strip().split(" ") for line in matches_raw] + if len(image_pairs) != nb_image: + if len(image_pairs) == nb_image-1:#file is not properly written in AV, if last image no match, no \n + image_pairs.append("") + else: + raise RuntimeError("Malformed image match file, %d vs %d images"%(len(image_pairs), nb_image-1)) + return image_pairs + +def open_descriptor_file(descriptor_file): + with open(descriptor_file, "rb") as df: + #read number of desc from first byte + nb_desv_encoded = struct.unpack('N', df.read(struct.calcsize('N')))[0] + #read rematinign floats + descriptors = np.asarray(list(struct.iter_unpack('f', df.read()))) + descriptors=np.reshape(descriptors, (nb_desv_encoded, -1)) + return descriptors + +def write_descriptor_file(descriptors, desk_filename): + with open(desk_filename, "wb") as df: + #nb of desc, as size_t (should be 1 byte) + nb_desv_encoded = struct.pack('N', int(descriptors.shape[0])) + df.write(nb_desv_encoded) + for descriptor in descriptors:#write descriptor as floats (4 bytes) + for d in descriptor: + d=struct.pack('f', d) + df.write(d) + +def parse_line(matches): + result = [m.strip() for m in matches.readline().split(" ")] + if len(result) == 1: + if result[0] == "": + return None + result = result[0] + return result + +def open_matches(match_file): + with open(match_file, "r") as match_file: + match_data = {} + while True: + view_ids = parse_line(match_file) + if view_ids is None: + break + view_id_0, view_id_1 = view_ids + nb_type_feat = parse_line(match_file) + if nb_type_feat != "1": + raise RuntimeError("Only supports one descriptor type at the time") + type_feat, nb_match = parse_line(match_file) + nb_match = int(nb_match) + matches_raw = [match_file.readline() for _ in range(nb_match)] + #avoid the squeeze when onlly one match + if len(matches_raw) == 1: + matches = np.expand_dims(np.loadtxt(matches_raw).astype(np.int32), axis=0) + else: + matches = np.loadtxt(matches_raw).astype(np.int32) + if matches.shape[0] != nb_match: + raise RuntimeError("Unexpected number of matches for view %s %d vs %d"%(view_id_0, matches.shape[0], nb_match)) + #save result + if not (view_id_0 in match_data.keys()): + match_data[view_id_0]={} + match_data[view_id_0][view_id_1] = matches + return match_data \ No newline at end of file From c5a1fadccde07289ceac4cde2e1aa1886333eaa2 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 10 Jun 2024 15:05:30 +0200 Subject: [PATCH 11/32] changed blender folder --- README.md | 5 +++-- meshroomPlugin.json | 4 ---- mrrs/3DR_benchmark/CalibrationComparison.py | 13 ++++++++++--- mrrs/3DR_benchmark/DepthMapComparison.py | 3 +++ mrrs/3DR_benchmark/LoadDataset.py | 8 +++++--- mrrs/3DR_benchmark/datasets/__init__.py | 3 +++ .../metrics/chamfer_distance/clean_mesh.py | 5 ++--- .../chamfer_distance/remove_invisible_faces.py | 5 ----- mrrs/blender/{render => }/CreateTrackingMarkers.py | 0 mrrs/blender/{render => }/Render360.py | 0 mrrs/blender/{render => }/RenderMesh.py | 0 mrrs/blender/{render => }/RenderOverlay.py | 0 mrrs/blender/{render => }/SyntheticDataset.py | 0 mrrs/blender/__init__.py | 8 ++++++++ mrrs/blender/render/__init__.py | 8 -------- mrrs/gaussian_splatting/GaussianSplatting.py | 2 -- mrrs/nerf/nerfstudio.py | 1 - setup.py | 4 ++-- 18 files changed, 36 insertions(+), 33 deletions(-) rename mrrs/blender/{render => }/CreateTrackingMarkers.py (100%) rename mrrs/blender/{render => }/Render360.py (100%) rename mrrs/blender/{render => }/RenderMesh.py (100%) rename mrrs/blender/{render => }/RenderOverlay.py (100%) rename mrrs/blender/{render => }/SyntheticDataset.py (100%) delete mode 100644 mrrs/blender/render/__init__.py diff --git a/README.md b/README.md index 5a3d783..26f9f71 100644 --- a/README.md +++ b/README.md @@ -91,10 +91,11 @@ pip install -e ./MeshroomResearch Contributions to Meshroom-Research are welcomed! Here's a quick overview of the project structure: -- `mrrs/core`: The library side of MRRS, it contains basic IOs, utilities, and common geometrical functions to be used in other plugins. +- `mrrs/core`: The library side of MRRS, it contains basic IOs, utilities, and common geometrical functions to be used in other plugins. /!\ Your plugin needs to handle the install and the dependencies. - `mrrs/`: Contains the code and the nodes related to a plugin feature. - `mrrs/meshrooPlugin.json`: Contains the list of plugins in this collection. +Utilize Meshroom's nodal UI for seamless integration, and refer to the [Meshroom's repo](https://github.com/alicevision/Meshroom) for creating custom nodes. We've introduced new types of node (eg. CondaNode and DockerNode), which automates environment management for your convenience. + See meshroom's [plugin documentation](https://github.com/alicevision/Meshroom/tree/dev/plugin_system/meshroom/core) to leanrn how to make your own plugins. -Utilize Meshroom's nodal UI for seamless integration, and refer to the [Meshroom's repo](https://github.com/alicevision/Meshroom) for creating custom nodes. We've introduced new types of node (eg. CondaNode and DockerNode), which automates environment management for your convenience. diff --git a/meshroomPlugin.json b/meshroomPlugin.json index ace80a7..1c655ed 100644 --- a/meshroomPlugin.json +++ b/meshroomPlugin.json @@ -1,8 +1,4 @@ [ - { - "pluginName":"MRRS", - "nodesFolder":"mrrs/core" - }, { "pluginName":"3DR Benchmark", "nodesFolder":"mrrs/3DR_benchmark", diff --git a/mrrs/3DR_benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py index e412cbf..5507050 100644 --- a/mrrs/3DR_benchmark/CalibrationComparison.py +++ b/mrrs/3DR_benchmark/CalibrationComparison.py @@ -3,19 +3,26 @@ """ __version__ = "3.0" +import logging import os import json +import numpy as np + from meshroom.core import desc -from mrrs.core.ios import * -from .metrics.metrics import * +from meshroom.core.plugin import CondaNode +from mrrs.core.ios import matrices_from_sfm_data +from .metrics.metrics import compute_calib_metric + -class CalibrationComparison(desc.Node): +class CalibrationComparison(CondaNode): category = 'MRRS - Benchmark' documentation = '''For each camera, compare its estimated parameters with a given groud truth.''' + envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") + inputs = [ desc.File( name='inputSfM', diff --git a/mrrs/3DR_benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py index 23b60db..1301212 100644 --- a/mrrs/3DR_benchmark/DepthMapComparison.py +++ b/mrrs/3DR_benchmark/DepthMapComparison.py @@ -21,6 +21,9 @@ class DepthMapComparison(desc.Node): Autorescale may be used otherwise but it is far from ideal. ''' + envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") + + inputs = [ desc.File( name='inputSfM', diff --git a/mrrs/3DR_benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py index 989aeee..f2c7598 100644 --- a/mrrs/3DR_benchmark/LoadDataset.py +++ b/mrrs/3DR_benchmark/LoadDataset.py @@ -7,18 +7,20 @@ from meshroom.core import desc import trimesh -from mrrs.core.geometry import * -from mrrs.core.ios import * +from mrrs.core.geometry import camera_projection, random_sample_points_mesh_2, transform_cg_cv +from mrrs.core.ios import open_depth_map, open_image, save_exr, save_image, sfm_data_from_matrices from .datasets import load_dataset #FIXME:move this into a command line node? +# Pros: can install mrrs via condanode +# Cons: no debugging for us, or just switch Node to CondaNode? quid processchunk? class LoadDataset(desc.Node): category = 'MRRS - Benchmark' documentation = '''Util node to open datasets with different data from the images in the .sfm''' - size = desc.DynamicNodeSize('sfmData') + envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") inputs = [ diff --git a/mrrs/3DR_benchmark/datasets/__init__.py b/mrrs/3DR_benchmark/datasets/__init__.py index 6ee71d1..784dea2 100644 --- a/mrrs/3DR_benchmark/datasets/__init__.py +++ b/mrrs/3DR_benchmark/datasets/__init__.py @@ -60,3 +60,6 @@ def load_dataset(sfm_data, dataset_type): print("Sensor size set to default (35mm)") return data, sfm_data + +if __name__ == "__main__": + pass \ No newline at end of file diff --git a/mrrs/3DR_benchmark/metrics/chamfer_distance/clean_mesh.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/clean_mesh.py index aec1a93..34bef1e 100644 --- a/mrrs/3DR_benchmark/metrics/chamfer_distance/clean_mesh.py +++ b/mrrs/3DR_benchmark/metrics/chamfer_distance/clean_mesh.py @@ -6,10 +6,9 @@ import argparse import numpy as np -import open3d as o3d -import igl - if __name__ == '__main__': + import open3d as o3d + import igl # Input arguments parser = argparse.ArgumentParser() diff --git a/mrrs/3DR_benchmark/metrics/chamfer_distance/remove_invisible_faces.py b/mrrs/3DR_benchmark/metrics/chamfer_distance/remove_invisible_faces.py index 1be9c01..8b90e89 100644 --- a/mrrs/3DR_benchmark/metrics/chamfer_distance/remove_invisible_faces.py +++ b/mrrs/3DR_benchmark/metrics/chamfer_distance/remove_invisible_faces.py @@ -6,11 +6,6 @@ import numpy as np import trimesh -# FIXME: not sure why i cannot do that in the yaml -mrrs_path=os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) -print("mrrs path "+mrrs_path) -import sys -sys.path.insert(0, mrrs_path) from mrrs.core.ios import open_image from mrrs.core.utils import listdir_fullpath diff --git a/mrrs/blender/render/CreateTrackingMarkers.py b/mrrs/blender/CreateTrackingMarkers.py similarity index 100% rename from mrrs/blender/render/CreateTrackingMarkers.py rename to mrrs/blender/CreateTrackingMarkers.py diff --git a/mrrs/blender/render/Render360.py b/mrrs/blender/Render360.py similarity index 100% rename from mrrs/blender/render/Render360.py rename to mrrs/blender/Render360.py diff --git a/mrrs/blender/render/RenderMesh.py b/mrrs/blender/RenderMesh.py similarity index 100% rename from mrrs/blender/render/RenderMesh.py rename to mrrs/blender/RenderMesh.py diff --git a/mrrs/blender/render/RenderOverlay.py b/mrrs/blender/RenderOverlay.py similarity index 100% rename from mrrs/blender/render/RenderOverlay.py rename to mrrs/blender/RenderOverlay.py diff --git a/mrrs/blender/render/SyntheticDataset.py b/mrrs/blender/SyntheticDataset.py similarity index 100% rename from mrrs/blender/render/SyntheticDataset.py rename to mrrs/blender/SyntheticDataset.py diff --git a/mrrs/blender/__init__.py b/mrrs/blender/__init__.py index e69de29..6d4ee4c 100644 --- a/mrrs/blender/__init__.py +++ b/mrrs/blender/__init__.py @@ -0,0 +1,8 @@ +from shutil import which + +if which('blender') is None: + print( + "[warning] mrrs: 'blender' command not found, the following nodes cannot be computed: \n", + "* SyntheticDataset \n", + "* RenderOverlay \n", + ) diff --git a/mrrs/blender/render/__init__.py b/mrrs/blender/render/__init__.py deleted file mode 100644 index 6d4ee4c..0000000 --- a/mrrs/blender/render/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from shutil import which - -if which('blender') is None: - print( - "[warning] mrrs: 'blender' command not found, the following nodes cannot be computed: \n", - "* SyntheticDataset \n", - "* RenderOverlay \n", - ) diff --git a/mrrs/gaussian_splatting/GaussianSplatting.py b/mrrs/gaussian_splatting/GaussianSplatting.py index 28adcd4..5d0be52 100644 --- a/mrrs/gaussian_splatting/GaussianSplatting.py +++ b/mrrs/gaussian_splatting/GaussianSplatting.py @@ -4,8 +4,6 @@ from meshroom.core.plugin import DockerNode from distutils.dir_util import copy_tree - -from collections import namedtuple from trimesh.exchange.ply import _parse_header, _ply_binary import numpy as np diff --git a/mrrs/nerf/nerfstudio.py b/mrrs/nerf/nerfstudio.py index 2277ff6..aad546d 100644 --- a/mrrs/nerf/nerfstudio.py +++ b/mrrs/nerf/nerfstudio.py @@ -1,7 +1,6 @@ import json import os import numpy as np -import cv2 import shutil from meshroom.core import desc diff --git a/setup.py b/setup.py index f1afe86..6729cae 100644 --- a/setup.py +++ b/setup.py @@ -17,5 +17,5 @@ install_requires=["numpy", "pillow", "opencv-python-headless", "trimesh", "click"], - # "oiio-python"], #FIXME: no pypy pacjages! need conda env or external install - ) \ No newline at end of file + # "oiio-python" py-openimageio], #FIXME: no pypy pacjages! need conda env or external install + ) \ No newline at end of file From b150a725ed144047bcfe9afb5a48b2551db610e5 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 17 Jun 2024 14:32:37 +0200 Subject: [PATCH 12/32] calibration comparison cli --- meshroomPlugin.json | 14 +-- mrrs/3DR_benchmark/CalibrationComparison.py | 103 ++++------------- mrrs/3DR_benchmark/calibration_comparison.py | 104 ++++++++++++++++++ mrrs/3DR_benchmark/general_env.yaml | 14 +++ mrrs/deep_feature_matching/MaskFeatures.py | 2 +- .../kornia_wrappers/light_glue_matcher.py | 2 - 6 files changed, 145 insertions(+), 94 deletions(-) create mode 100644 mrrs/3DR_benchmark/calibration_comparison.py create mode 100644 mrrs/3DR_benchmark/general_env.yaml diff --git a/meshroomPlugin.json b/meshroomPlugin.json index 1c655ed..cfdf7ae 100644 --- a/meshroomPlugin.json +++ b/meshroomPlugin.json @@ -1,11 +1,11 @@ [ { - "pluginName":"3DR Benchmark", + "pluginName":"3DR_Benchmark", "nodesFolder":"mrrs/3DR_benchmark", "pipelineFolder":"mrrs/3DR_benchmark/pipelines" }, { - "pluginName":"Gaussian Splatting", + "pluginName":"Gaussian_Splatting", "nodesFolder":"mrrs/gaussian_splatting" }, { @@ -14,20 +14,20 @@ "pipelineFolder":"mrrs/colmap/pipelines" }, { - "pluginName":"Deep Depth Maps", + "pluginName":"Deep_Depth_Maps", "nodesFolder":"mrrs/deep_depth_map" }, { - "pluginName":"Nerfstudio", - "nodesFolder":"mrrs/nerfstudio" + "pluginName":"Nerf_studio", + "nodesFolder":"mrrs/nerf" }, { - "pluginName":"Reality Capture", + "pluginName":"Reality_Capture", "nodesFolder":"mrrs/reality_capture", "pipelineFolder":"mrrs/reality_capture/pipelines" }, { - "pluginName":"Stereo Photometry", + "pluginName":"Stereo_Photometry", "nodesFolder":"mrrs/stereo_photometry" }, { diff --git a/mrrs/3DR_benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py index 5507050..f8a75d0 100644 --- a/mrrs/3DR_benchmark/CalibrationComparison.py +++ b/mrrs/3DR_benchmark/CalibrationComparison.py @@ -3,24 +3,19 @@ """ __version__ = "3.0" -import logging -import os -import json -import numpy as np +import os from meshroom.core import desc - from meshroom.core.plugin import CondaNode -from mrrs.core.ios import matrices_from_sfm_data -from .metrics.metrics import compute_calib_metric - class CalibrationComparison(CondaNode): category = 'MRRS - Benchmark' documentation = '''For each camera, compare its estimated parameters with a given groud truth.''' - + + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "calibration_comparison.py")+'" {allParams}' + envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") inputs = [ @@ -49,7 +44,7 @@ class CalibrationComparison(CondaNode): ), desc.StringParam( - name='csv_name', + name='csvName', label='CsvName', description='Name for the csv file to be used.', value="calibration_comparison.csv", @@ -70,13 +65,19 @@ class CalibrationComparison(CondaNode): name='outputFolder', label='Output Folder', description='Output folder for generated results.', + group="", value=desc.Node.internalFolder, ), desc.File( name='outputCsv', label='Output Csv', description='Output file to generated results.', +<<<<<<< HEAD value=lambda attr: os.path.join(desc.Node.internalFolder, attr.node.csv_name.value), +======= + value=lambda attr: os.path.join(desc.Node.internalFolder, attr.node.csvName.value), + uid=[], +>>>>>>> calibration comparison cli ) ] @@ -85,10 +86,10 @@ def check_inputs(self, chunk): Checks that all inputs are properly set. """ if not chunk.node.inputSfM.value: - chunk.logger.warning('No inputSfM in node DepthMapComparison, skipping') + chunk.logger.warning('No inputSfM in node, skipping') return False if not chunk.node.inputSfMGT.value: - chunk.logger.warning('No inputSfMGT in node DepthMapComparison, skipping') + chunk.logger.warning('No inputSfMGT in node, skipping') return False return True @@ -96,75 +97,9 @@ def processChunk(self, chunk): """ Computes the different metrics on the input and groud truth depth maps. """ - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - #open inputs - if not self.check_inputs(chunk): - return - - sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) - sfm_data_gt=json.load(open(chunk.node.inputSfMGT.value,"r")) - views_ids = [view["viewId"] for view in sfm_data["views"]] - views_ids_gt = [view["viewId"] for view in sfm_data_gt["views"]] - if len(views_ids_gt) != len(views_ids): - raise RuntimeError("Mismatching number of views (%d vs %d"%(len(views_ids), len(views_ids_gt))) - #getting calib in matrix form (along with id) - extrinsics, intrinsics, poses_id, intrinsics_id, _, _ = matrices_from_sfm_data(sfm_data) - extrinsics_gt, intrinsics_gt, poses_id_gt, intrinsics_id_gt, _, _ = matrices_from_sfm_data(sfm_data_gt) - - chunk.logger.info('Computing metrics for %d calibrations'%len(sfm_data['views'])) - metrics = chunk.node.metrics.value - #compute metrics - computed_metric_values = [] - for index, (view_id, extrinsic, intrinsic) in enumerate(zip(views_ids, extrinsics, intrinsics)): - if (extrinsic is None) or (intrinsic is None): - logging.warning("Calibration view "+view_id+" was not computed (likely because the SfM was not able to compute a pose)") - computed_metric_values.append([0 if m=="validCams" else np.nan for m in metrics]) - continue - #retrieve corresponding GT from id - index_gt = np.where(view_id==np.asarray(views_ids_gt)) - if index_gt[0].size == 0: - logging.warning("View "+view_id+" not present in groud truth sfm, skipping") - continue - # chunk.logger.info('Computing metrics for view %d/%d (%s,%s)'%(index, len(views_ids),view_id,views_ids_gt[index_gt[0][0]])) - index_gt = index_gt[0][0]#FIXME: sanity check more than 1 - extrinsic_gt = extrinsics_gt[index_gt] - intrinsic_gt = intrinsics_gt[index_gt] - #metric computation - metric_values = [] - for metric in metrics: - chunk.logger.info("Computing "+metric) - metric_value = compute_calib_metric(metric, extrinsic, intrinsic, extrinsic_gt, intrinsic_gt) - chunk.logger.info(str(metric_value)) - metric_values.append(metric_value) - computed_metric_values.append(metric_values) - #stack up and compute average on dataset - computed_metric_values = np.asarray(computed_metric_values) - average_metric_values = np.nanmean(computed_metric_values, axis=0) - median_metric_values = np.nanmedian(computed_metric_values, axis=0) - #write output file - os.makedirs(chunk.node.outputFolder.value, exist_ok=True) - - with open(chunk.node.outputCsv.value, "w") as csv_file: - #header - csv_file.write("View,") - for metric in metrics: - csv_file.write(metric+",") - csv_file.write("\n") - #values - for view_id, metric_values in zip(views_ids, computed_metric_values): - csv_file.write(view_id+",") - for metric_value in metric_values: - csv_file.write("%f,"%metric_value) - csv_file.write("\n") - #average and median value - csv_file.write("average,") - for average_metric_value in average_metric_values: - csv_file.write("%f,"%average_metric_value) - csv_file.write("\n") - csv_file.write("median,") - for median_metric_value in median_metric_values: - csv_file.write("%f,"%median_metric_value) - chunk.logger.info('Calib comparison ends') - finally: - chunk.logManager.end() + chunk.logManager.start(chunk.node.verboseLevel.value) + if not self.check_inputs(chunk): + raise RuntimeError("Missing arguments") + super().processChunk(chunk) + chunk.logger.info('Calib comparison ends') + chunk.logManager.end() diff --git a/mrrs/3DR_benchmark/calibration_comparison.py b/mrrs/3DR_benchmark/calibration_comparison.py new file mode 100644 index 0000000..f10d265 --- /dev/null +++ b/mrrs/3DR_benchmark/calibration_comparison.py @@ -0,0 +1,104 @@ +import json +import logging +import argparse +import os + +import numpy as np + +from mrrs.core.ios import matrices_from_sfm_data +from metrics.metrics import compute_calib_metric + + +parser = argparse.ArgumentParser() +parser.add_argument( + '-i', '--inputSfM', + help="", +) +parser.add_argument( + '-g', '--inputSfMGT', + help="", +) +parser.add_argument( + '-o', '--outputCsv', + help="", +) +parser.add_argument( + '-m', '--metrics', + help="", +) + +parser.add_argument( + '-v', '--verboseLevel', + help="Verbose level", +) +args = parser.parse_args() + + +input_sfm=args.inputSfM +gt_sfm=args.inputSfMGT +metrics=args.metrics.split(",") +outputCsv=args.outputCsv + +sfm_data=json.load(open(input_sfm,"r")) +sfm_data_gt=json.load(open(gt_sfm,"r")) +views_ids = [view["viewId"] for view in sfm_data["views"]] +views_ids_gt = [view["viewId"] for view in sfm_data_gt["views"]] +if len(views_ids_gt) != len(views_ids): + raise RuntimeError("Mismatching number of views (%d vs %d"%(len(views_ids), len(views_ids_gt))) +#getting calib in matrix form (along with id) +extrinsics, intrinsics, poses_id, intrinsics_id, _, _ = matrices_from_sfm_data(sfm_data) +extrinsics_gt, intrinsics_gt, poses_id_gt, intrinsics_id_gt, _, _ = matrices_from_sfm_data(sfm_data_gt) + +logging.info('Computing metrics for %d calibrations'%len(sfm_data['views'])) + +#compute metrics +computed_metric_values = [] +for index, (view_id, extrinsic, intrinsic) in enumerate(zip(views_ids, extrinsics, intrinsics)): + if (extrinsic is None) or (intrinsic is None): + logging.warning("Calibration view "+view_id+" was not computed (likely because the SfM was not able to compute a pose)") + computed_metric_values.append([0 if m=="validCams" else np.nan for m in metrics]) + continue + #retrieve corresponding GT from id + index_gt = np.where(view_id==np.asarray(views_ids_gt)) + if index_gt[0].size == 0: + logging.warning("View "+view_id+" not present in groud truth sfm, skipping") + continue + # logging.info('Computing metrics for view %d/%d (%s,%s)'%(index, len(views_ids),view_id,views_ids_gt[index_gt[0][0]])) + index_gt = index_gt[0][0]#FIXME: sanity check more than 1 + extrinsic_gt = extrinsics_gt[index_gt] + intrinsic_gt = intrinsics_gt[index_gt] + #metric computation + metric_values = [] + for metric in metrics: + logging.info("Computing "+metric) + metric_value = compute_calib_metric(metric, extrinsic, intrinsic, extrinsic_gt, intrinsic_gt) + logging.info(str(metric_value)) + metric_values.append(metric_value) + computed_metric_values.append(metric_values) +#stack up and compute average on dataset +computed_metric_values = np.asarray(computed_metric_values) +average_metric_values = np.nanmean(computed_metric_values, axis=0) +median_metric_values = np.nanmedian(computed_metric_values, axis=0) +#write output file +os.makedirs(os.path.dirname(outputCsv), exist_ok=True) + +with open(outputCsv, "w") as csv_file: + #header + csv_file.write("View,") + for metric in metrics: + csv_file.write(metric+",") + csv_file.write("\n") + #values + for view_id, metric_values in zip(views_ids, computed_metric_values): + csv_file.write(view_id+",") + for metric_value in metric_values: + csv_file.write("%f,"%metric_value) + csv_file.write("\n") + #average and median value + csv_file.write("average,") + for average_metric_value in average_metric_values: + csv_file.write("%f,"%average_metric_value) + csv_file.write("\n") + csv_file.write("median,") + for median_metric_value in median_metric_values: + csv_file.write("%f,"%median_metric_value) diff --git a/mrrs/3DR_benchmark/general_env.yaml b/mrrs/3DR_benchmark/general_env.yaml new file mode 100644 index 0000000..fd4d02a --- /dev/null +++ b/mrrs/3DR_benchmark/general_env.yaml @@ -0,0 +1,14 @@ +name: 3drBench +channels: + - conda-forge + - open3d-admin + - defaults +dependencies: + - python #=3.9 + - numpy + - openimageio + - py-openimageio + - pip + - pip: + #- git+https://github.com/alicevision/MeshroomResearch.git + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # diff --git a/mrrs/deep_feature_matching/MaskFeatures.py b/mrrs/deep_feature_matching/MaskFeatures.py index 3b43ef2..820f108 100644 --- a/mrrs/deep_feature_matching/MaskFeatures.py +++ b/mrrs/deep_feature_matching/MaskFeatures.py @@ -7,7 +7,7 @@ from meshroom.core import desc from mrrs.core.ios import * -from .utils import open_descriptor_file, write_descriptor_file +from .kornia_wrappers.utils import open_descriptor_file, write_descriptor_file class MaskFeatures(desc.Node): diff --git a/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py b/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py index 91eefeb..deef556 100644 --- a/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py +++ b/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py @@ -1,7 +1,5 @@ import json import os -import sys - import click import numpy as np From d67130534553411dd1289ad4b65c7e510faaabf5 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 17 Jun 2024 17:19:05 +0200 Subject: [PATCH 13/32] fixed reality capture for new plugin system --- mrrs/reality_capture/ExportXMP.py | 51 ++----------- mrrs/reality_capture/ImportXMP.py | 50 ++----------- mrrs/reality_capture/env.yaml | 14 ++++ .../reality_capture.py | 75 +++++++++++++++---- 4 files changed, 90 insertions(+), 100 deletions(-) create mode 100644 mrrs/reality_capture/env.yaml rename mrrs/{3DR_benchmark/datasets => reality_capture}/reality_capture.py (66%) diff --git a/mrrs/reality_capture/ExportXMP.py b/mrrs/reality_capture/ExportXMP.py index 0ff086f..3c6760c 100644 --- a/mrrs/reality_capture/ExportXMP.py +++ b/mrrs/reality_capture/ExportXMP.py @@ -3,21 +3,21 @@ """ __version__ = "3.0" -import json -import os +import os from meshroom.core import desc +from meshroom.core.plugin import CondaNode -from mrrs.core.ios import get_image_sizes, matrices_from_sfm_data -from mrrs.datasets.reality_capture import export_reality_capture - - -class ExportXMP(desc.Node): +class ExportXMP(CondaNode): category = 'Meshroom Research' documentation = '''Node to create an XMP file from camera calibration.''' + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "reality_capture.py")+'" exportxmp {sfmDataValue} {outputFolderValue} ' + + envFile = os.path.join(os.path.dirname(__file__), "env.yaml") + inputs = [ desc.ChoiceParam( @@ -55,40 +55,3 @@ class ExportXMP(desc.Node): ), ] - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if chunk.node.sfmData.value=='': - chunk.logger.warning('No sfmData, skipping') - return False - return True - - def processChunk(self, chunk): - """ - Opens the dataset data. - """ - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - if not self.check_inputs(chunk): - return - chunk.logger.info("Starts to load data from sfmdata") - sfm_data = json.load(open(chunk.node.sfmData.value, "r")) - - (extrinsics_all_cams, intrinsics_all_cams, views_id, - poses_id, intrinsics_id, pixel_sizes_all_cams) = matrices_from_sfm_data(sfm_data) - image_sizes = get_image_sizes(sfm_data) - chunk.logManager.start("Exporting calibration") - images_names = [os.path.basename(view["path"])[:-4] for view in sfm_data["views"]] - for image_name, extrinsics, intrinsics, pixel_size, image_size in zip(images_names, extrinsics_all_cams, - intrinsics_all_cams, pixel_sizes_all_cams, image_sizes): - if extrinsics is not None: - xmp_file = os.path.join(chunk.node.outputFolder.value, image_name+".xmp") - export_reality_capture(xmp_file, extrinsics, intrinsics, pixel_size,image_size ) - - chunk.logger.info('XMP export ends') - finally: - chunk.logManager.end() - - - diff --git a/mrrs/reality_capture/ImportXMP.py b/mrrs/reality_capture/ImportXMP.py index 582df6b..b6e7f22 100644 --- a/mrrs/reality_capture/ImportXMP.py +++ b/mrrs/reality_capture/ImportXMP.py @@ -3,20 +3,21 @@ """ __version__ = "3.0" -import json -import os -import shutil from meshroom.core import desc -from mrrs.core.ios import sfm_data_from_matrices -from mrrs.datasets.reality_capture import import_xmp, SENSOR_SIZE +from meshroom.core.plugin import CondaNode +import os -class ImportXMP(desc.Node): +class ImportXMP(CondaNode): category = 'Meshroom Research' documentation = '''Node to import a camera calibration from an XMP''' + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "reality_capture.py")+'" importxmp {sfmDataValue} {xmpDataValue} {outputSfMDataValue}' + + envFile = os.path.join(os.path.dirname(__file__), "env.yaml") + inputs = [ desc.File( @@ -58,40 +59,3 @@ class ImportXMP(desc.Node): value=os.path.join(desc.Node.internalFolder, "outputSfMData.sfm"), ), ] - - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if chunk.node.sfmData.value=='': - chunk.logger.warning('No sfmData, skipping') - return False - return True - - def processChunk(self, chunk): - # try: - chunk.logManager.start(chunk.node.verboseLevel.value) - if not self.check_inputs(chunk): - return - chunk.logger.info("Starts to load data from XMP") - xmp_folder = chunk.node.xmpData.value - with open(chunk.node.sfmData.value, "r") as json_file: - sfm_data = json.load(json_file) - if xmp_folder == "": - xmp_folder = os.path.dirname(sfm_data["views"][0]["path"]) - #note: focal already in pixels - extrinsics, intrinsics, poses_ids, intrinsics_ids, images_size = import_xmp(sfm_data, xmp_folder) - sfm_data = sfm_data_from_matrices(extrinsics, intrinsics, - poses_ids, intrinsics_ids, images_size, - sfm_data=sfm_data, sensor_width = SENSOR_SIZE - ) - # Save the generated SFM data to JSON file - with open(os.path.join(chunk.node.outputSfMData.value), 'w') as f: - json.dump(sfm_data, f, indent=4) - chunk.logger.info('XMP import ends') - - # finally: - # chunk.logManager.end() - - - diff --git a/mrrs/reality_capture/env.yaml b/mrrs/reality_capture/env.yaml new file mode 100644 index 0000000..fd4d02a --- /dev/null +++ b/mrrs/reality_capture/env.yaml @@ -0,0 +1,14 @@ +name: 3drBench +channels: + - conda-forge + - open3d-admin + - defaults +dependencies: + - python #=3.9 + - numpy + - openimageio + - py-openimageio + - pip + - pip: + #- git+https://github.com/alicevision/MeshroomResearch.git + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # diff --git a/mrrs/3DR_benchmark/datasets/reality_capture.py b/mrrs/reality_capture/reality_capture.py similarity index 66% rename from mrrs/3DR_benchmark/datasets/reality_capture.py rename to mrrs/reality_capture/reality_capture.py index bdf7410..f03a86c 100644 --- a/mrrs/3DR_benchmark/datasets/reality_capture.py +++ b/mrrs/reality_capture/reality_capture.py @@ -1,11 +1,15 @@ import re -import os +import os import numpy as np +import click +import json + +from mrrs.core.ios import get_image_sizes, matrices_from_sfm_data, sfm_data_from_matrices #in RC the sensor size is set to 35mm SENSOR_SIZE = 35 -def parse_xmp(xmp_file): +def _parse_xmp(xmp_file): """ Parses the xmp from reality capture. """ @@ -21,19 +25,20 @@ def parse_xmp(xmp_file): "PrincipalPointV=\"([+-]?([0-9]*[.])?[0-9]+)\"", xmp_lines) focalLength_35mm = re.search( "xcr:FocalLength35mm=\"([+-]?([0-9]*[.])?[0-9]+)\"", xmp_lines) - if camera_center is None or rotation_matrix is None or principal_point_u is None or principal_point_v is None or focalLength_35mm is None: + if (camera_center is None or rotation_matrix is None or principal_point_u is None + or principal_point_v is None or focalLength_35mm is None): return None, None # DistortionCoeficients InMeshing camera_center = np.asarray( - camera_center.group(1).split(" "), dtype=np.float32) + camera_center.group(1).strip().split(" "), dtype=np.float32) rotation_matrix = np.asarray(rotation_matrix.group( - 1).split(" "), dtype=np.float32).reshape([3, 3]) + 1).strip().split(" "), dtype=np.float32).reshape([3, 3]) principal_point_u = np.asarray( - principal_point_u.group(1).split(" "), dtype=np.float32) + principal_point_u.group(1).strip().split(" "), dtype=np.float32) principal_point_v = np.asarray( - principal_point_v.group(1).split(" "), dtype=np.float32) + principal_point_v.group(1).strip().split(" "), dtype=np.float32) focalLength_35mm = np.asarray( - focalLength_35mm.group(1).split(" "), dtype=np.float32) + focalLength_35mm.group(1).strip().split(" "), dtype=np.float32) # TODO if needed, xcr:DistortionModel="brown3" xcr:Skew="0" xcr:AspectRatio="1" intrinsics = np.zeros([3, 3]) extrinsics = np.zeros([4, 4]) @@ -47,7 +52,7 @@ def parse_xmp(xmp_file): extrinsics[3, 3] = 1 return extrinsics, intrinsics -def export_reality_capture(xmp_file, extrinsics, intrinsics, pixel_size, image_size): +def _export_xmp(xmp_file, extrinsics, intrinsics, pixel_size, image_size): """ Saves the xmp for reality capture. Will convert meshroom sfm extrinsics and intrinsics converted to mrrs, into reality capture format. @@ -92,7 +97,7 @@ def format_array(array): with open(xmp_file, "w") as f: f.write(xmp_string) -def import_xmp(sfm_data, xmp_folder): +def _import_xmp(sfm_data, xmp_folder): """ Will import XMPs based on the path in sfmdata """ @@ -102,7 +107,7 @@ def import_xmp(sfm_data, xmp_folder): intrinsics_ids = [] images_size = [] for i, view in enumerate(sfm_data["views"]): - print("Loading xmp for view "+view["viewId"]) + print("Loading xmp for view "+view["viewId"]+" "+view["path"]) scene_image = view["path"] image_size = (int(view["width"]),int(view["height"]))#FIXME: check images_size.append(image_size) @@ -118,7 +123,7 @@ def import_xmp(sfm_data, xmp_folder): intrinsics.append(None) continue - e, i = parse_xmp(scenes_calib) + e, i = _parse_xmp(scenes_calib) if e is None: raise RuntimeError("Invalid XMP "+scenes_calib) @@ -144,4 +149,48 @@ def import_xmp(sfm_data, xmp_folder): extrinsics.append(e) intrinsics.append(i) return extrinsics, intrinsics, poses_ids, intrinsics_ids, images_size - \ No newline at end of file + +@click.group() +def rc(): + pass + +@rc.command() +@click.argument("sfmdata") +@click.argument("xmpdata") +@click.argument("outputsfmdata") +def importXMP(sfmdata, xmpdata, outputsfmdata): + xmp_folder = xmpdata + with open(sfmdata, "r") as json_file: + sfm_data = json.load(json_file) + #ifxmp folder not set, assumes it is with the images + if xmp_folder == "": + xmp_folder = os.path.dirname(sfm_data["views"][0]["path"]) + #note: focal already in pixels + extrinsics, intrinsics, poses_ids, intrinsics_ids, images_size = _import_xmp(sfm_data, xmp_folder) + sfm_data = sfm_data_from_matrices(extrinsics, intrinsics, + poses_ids, intrinsics_ids, images_size, + sfm_data=sfm_data, sensor_width = SENSOR_SIZE + ) + # Save the generated SFM data to JSON file + with open(os.path.join(outputsfmdata), 'w') as f: + json.dump(sfm_data, f, indent=4) + +@rc.command() +@click.argument("sfmdata") +@click.argument("outputfolder") +def exportXMP(sfmdata, outputfolder): + sfm_data = json.load(open(sfmdata, "r")) + (extrinsics_all_cams, intrinsics_all_cams, views_id, + poses_id, intrinsics_id, pixel_sizes_all_cams) = matrices_from_sfm_data(sfm_data) + image_sizes = get_image_sizes(sfm_data) + images_names = [os.path.basename(view["path"])[:-4] for view in sfm_data["views"]] + for image_name, extrinsics, intrinsics, pixel_size, image_size in zip(images_names, extrinsics_all_cams, + intrinsics_all_cams, pixel_sizes_all_cams, image_sizes): + if extrinsics is not None: + xmp_file = os.path.join(outputfolder, image_name+".xmp") + _export_xmp(xmp_file, extrinsics, intrinsics, pixel_size,image_size ) + + + +if __name__ == '__main__': + rc() From 029c2f3102ffb9cd0fada6cc51161598f9151091 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 17 Jun 2024 18:09:21 +0200 Subject: [PATCH 14/32] prepared depth map comparison --- mrrs/3DR_benchmark/CleanMesh.py | 50 -------- mrrs/3DR_benchmark/DepthMapComparison.py | 122 +------------------ mrrs/3DR_benchmark/depth_map_comparison.py | 133 +++++++++++++++++++++ mrrs/reality_capture/ExportXMP.py | 2 +- mrrs/reality_capture/ImportXMP.py | 2 +- 5 files changed, 141 insertions(+), 168 deletions(-) delete mode 100644 mrrs/3DR_benchmark/CleanMesh.py create mode 100644 mrrs/3DR_benchmark/depth_map_comparison.py diff --git a/mrrs/3DR_benchmark/CleanMesh.py b/mrrs/3DR_benchmark/CleanMesh.py deleted file mode 100644 index 1a70f61..0000000 --- a/mrrs/3DR_benchmark/CleanMesh.py +++ /dev/null @@ -1,50 +0,0 @@ -__version__ = "1.0" -import os -from meshroom.core import desc -from meshroom.core.plugin import CondaNode - -from .metrics.chamfer_distance import ENV_FILE - -class CleanMesh(CondaNode): - - #overides the env path - env_file = ENV_FILE - - category = 'MRRS - Benchmark' - commandLine = 'python "'+os.path.join(os.path.dirname(__file__),"..", "..", "metrics", "baptiste", "remove_invisible_faces.py")+'" {allParams}' - gpu = desc.Level.NONE - documentation = ''' ''' - - inputs = [ - desc.File( - name="input_mesh", - label='Input Mesh', - description='', - value='', - ), - desc.File( - name="face_index_images_folder", - label='Faces Index Images', - description='', - value='', - ), - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description='''Verbosity level (fatal, error, warning, info, debug, trace).''', - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - group="" - ), - ] - - outputs = [ - desc.File( - name="output_mesh", - label="Ouput Mesh", - description="", - value=os.path.join(desc.Node.internalFolder, "cleaned_mesh.ply"), - ), - ] - diff --git a/mrrs/3DR_benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py index 1301212..35df2fd 100644 --- a/mrrs/3DR_benchmark/DepthMapComparison.py +++ b/mrrs/3DR_benchmark/DepthMapComparison.py @@ -1,16 +1,10 @@ __version__ = "3.0" -import logging import os -import json -import numpy as np - from meshroom.core import desc +from meshroom.core.plugin import CondaNode -from mrrs.core.ios import open_depth_map, save_exr -from .metrics.metrics import compute_depth_metric - -class DepthMapComparison(desc.Node): +class DepthMapComparison(CondaNode): # size = desc.DynamicNodeSize('inputSfM') category = 'MRRS - Benchmark' @@ -21,9 +15,10 @@ class DepthMapComparison(desc.Node): Autorescale may be used otherwise but it is far from ideal. ''' + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "depth_map_comparison.py")+'" {allParams}' + envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") - inputs = [ desc.File( name='inputSfM', @@ -75,6 +70,7 @@ class DepthMapComparison(desc.Node): label='CsvName', description='Name for the csv file to be used.', value="depth_map_comparison.csv", + group="0" ), desc.ChoiceParam( @@ -94,6 +90,7 @@ class DepthMapComparison(desc.Node): description='Output folder for generated results.', value=desc.Node.internalFolder, ), + desc.File( name='outputCsv', label='Output Csv', @@ -101,110 +98,3 @@ class DepthMapComparison(desc.Node): value=lambda attr: os.path.join(desc.Node.internalFolder, attr.node.csv_name.value), ) ] - - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if not chunk.node.inputSfM.value: - chunk.logger.warning('No inputSfM in node DepthMapComparison, skipping') - return False - if not chunk.node.depthMapsFolder.value: - chunk.logger.warning('No depthMapsFolder in node DepthMapComparison, skipping') - return False - if not chunk.node.depthMapsFolderGT.value: - chunk.logger.warning('No depthMapsFolderGT in node DepthMapComparison, skipping') - return False - return True - - def parse_inputs(self, chunk): - """ - Opens the necessary files and folders. - """ - sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) - views_ids = [view["viewId"] for view in sfm_data["views"]] - depth_folder = chunk.node.depthMapsFolder.value - depth_files = [os.path.join(depth_folder, str(views_id)+"_depthMap.exr") for views_id in views_ids]#FIXME: hardcoded filename - #open from folder (needs to have matching file names) - depth_gt_files = [os.path.join(chunk.node.depthMapsFolderGT.value, view_id+"_depthMap.exr") - for view_id in views_ids] - - if len(depth_files) != len(depth_gt_files): - raise BaseException("Mismatching number of depth maps in source and ground truth folders (%d vs %d"%(len(depth_files), len(depth_gt_files))) - - return views_ids, depth_files, depth_gt_files, [view["path"] for view in sfm_data["views"]] - - - def processChunk(self, chunk): - """ - Computes the different metrics on the inputSfM and groud truth depth maps. - """ - - chunk.logManager.start(chunk.node.verboseLevel.value) - #open inputs - if not self.check_inputs(chunk): - return - views_ids, depth_files, depth_gt_files, view_path = self.parse_inputs(chunk) - chunk.logger.info('Computing metrics for %d depths maps'%len(depth_files)) - metrics = chunk.node.metrics.value - auto_rescale = chunk.node.autoRescale.value - mask_value=chunk.node.maskValue.value - if (mask_value == "") or (mask_value is None): - mask_value = None - else: - mask_value = float(mask_value) - chunk.logger.info('Will ignore depth values <%f'%mask_value) - - #compute metrics - computed_metric_values = [] - for index, (view_id, depth_file, depth_gt_file, vp) in enumerate(zip(views_ids, depth_files, depth_gt_files, view_path)): - chunk.logger.info('Computing metrics for depth maps %d/%d: %s and %s'%(index, len(depth_files), depth_file, depth_gt_file)) - - if not os.path.exists(depth_file): - logging.warning("Depth map for view "+view_id+" was not computed (likely because the SfM was not able to compute a pose)") - computed_metric_values.append([np.nan for _ in metrics]) - continue - depth_map = open_depth_map(depth_file) - depth_map_gt = open_depth_map(depth_gt_file) - - #metric computation - metric_values = [] - for metric in metrics: - metric_value, metric_per_pixel, processed_depth_map_gt = compute_depth_metric(depth_map, depth_map_gt, metric, - auto_resize=True, auto_rescale=auto_rescale, mask_value=mask_value)#FIXME: need to make sure the depth is at the same scale - metric_values.append(metric_value) - #usefull display - if metric_per_pixel is not None: - save_exr(metric_per_pixel, os.path.join(chunk.node.output.value, view_id+"_distance_"+metric+"_depthMap.exr")) - computed_metric_values.append(metric_values) - - #stack up and compute average on dataset - computed_metric_values = np.asarray(computed_metric_values) - average_metric_values = np.mean(computed_metric_values, axis=0) - median_metric_values = np.median(computed_metric_values, axis=0) - #write output file - os.makedirs(chunk.node.output.value, exist_ok=True) - with open(chunk.node.outputCsv.value, "w") as csv_file: - #header - csv_file.write("View,") - for metric in metrics: - csv_file.write(metric+",") - csv_file.write("\n") - #values - for view_id, metric_values in zip(views_ids, computed_metric_values): - csv_file.write(view_id+",") - for metric_value in metric_values: - csv_file.write("%f,"%metric_value) - csv_file.write("\n") - #average and median value - csv_file.write("average,") - for average_metric_value in average_metric_values: - csv_file.write("%f,"%average_metric_value) - csv_file.write("\n") - csv_file.write("median,") - for median_metric_value in median_metric_values: - csv_file.write("%f,"%median_metric_value) - - chunk.logger.info('Depth map comparison end') - - diff --git a/mrrs/3DR_benchmark/depth_map_comparison.py b/mrrs/3DR_benchmark/depth_map_comparison.py new file mode 100644 index 0000000..5843208 --- /dev/null +++ b/mrrs/3DR_benchmark/depth_map_comparison.py @@ -0,0 +1,133 @@ +import json +import logging +import argparse +import os + +from mrrs.core.ios import open_depth_map, save_exr +from metrics.metrics import compute_depth_metric + +import numpy as np + +parser = argparse.ArgumentParser() +parser.add_argument( + '-i', '--inputSfM', + help="", +) +parser.add_argument( + '-f', '--depthMapsFolder', + help="", +) +parser.add_argument( + '-g', '--depthMapsFolderGT', + help="", +) +parser.add_argument( + '-m', '--metrics', + help="", +) + +parser.add_argument( + '-r', '--autoRescale', + help="", +) + +parser.add_argument( + '-k', '--maskValue', + help="", +) + +parser.add_argument( + '-o', '--outputCsv', + help="", +) + +parser.add_argument( + '-v', '--verboseLevel', + help="Verbose level", +) + +args = parser.parse_args() + +input_sfm=args.inputSfM +depth_folder = args.depthMapsFolder +depth_folder_gt = args.depthMapsFolderGT +metrics = args.metrics.split(",") +outputCsv = args.outputCsv +auto_rescale = bool(args.autoRescale) +mask_value = args.maskValue + +sfm_data=json.load(open(input_sfm,"r")) +views_ids = [view["viewId"] for view in sfm_data["views"]] + +depth_files = [os.path.join(depth_folder, str(views_id)+"_depthMap.exr") for views_id in views_ids]#FIXME: hardcoded filename +#open from folder (needs to have matching file names) +depth_gt_files = [os.path.join(depth_folder_gt, view_id+"_depthMap.exr") + for view_id in views_ids] + +if len(depth_files) != len(depth_gt_files): + raise BaseException("Mismatching number of depth maps in source and ground truth folders (%d vs %d"%(len(depth_files), len(depth_gt_files))) + +view_path = [view["path"] for view in sfm_data["views"]] + +print('Computing metrics for %d depths maps'%len(depth_files)) + + +if (mask_value == "") or (mask_value is None): + mask_value = None +else: + mask_value = float(mask_value) + print('Will ignore depth values <%f'%mask_value) + +#compute metrics +computed_metric_values = [] +for index, (view_id, depth_file, depth_gt_file, vp) in enumerate(zip(views_ids, depth_files, depth_gt_files, view_path)): + print('Computing metrics for depth maps %d/%d: %s and %s'%(index, len(depth_files), depth_file, depth_gt_file)) + + if not os.path.exists(depth_file): + logging.warning("Depth map for view "+view_id+" was not computed (likely because the SfM was not able to compute a pose)") + computed_metric_values.append([np.nan for _ in metrics]) + continue + depth_map = open_depth_map(depth_file) + depth_map_gt = open_depth_map(depth_gt_file) + + #metric computation + metric_values = [] + for metric in metrics: + metric_value, metric_per_pixel, processed_depth_map_gt = compute_depth_metric(depth_map, depth_map_gt, metric, + auto_resize=True, auto_rescale=auto_rescale, mask_value=mask_value)#FIXME: need to make sure the depth is at the same scale + metric_values.append(metric_value) + #usefull display + if metric_per_pixel is not None: + save_exr(metric_per_pixel, os.path.join(os.path.dirname(outputCsv), view_id+"_distance_"+metric+"_depthMap.exr")) + computed_metric_values.append(metric_values) + +#stack up and compute average on dataset +computed_metric_values = np.asarray(computed_metric_values) +average_metric_values = np.mean(computed_metric_values, axis=0) +median_metric_values = np.median(computed_metric_values, axis=0) +#write output file +os.makedirs(os.path.dirname(outputCsv), exist_ok=True) +with open(outputCsv, "w") as csv_file: + #header + csv_file.write("View,") + for metric in metrics: + csv_file.write(metric+",") + csv_file.write("\n") + #values + for view_id, metric_values in zip(views_ids, computed_metric_values): + csv_file.write(view_id+",") + for metric_value in metric_values: + csv_file.write("%f,"%metric_value) + csv_file.write("\n") + #average and median value + csv_file.write("average,") + for average_metric_value in average_metric_values: + csv_file.write("%f,"%average_metric_value) + csv_file.write("\n") + csv_file.write("median,") + for median_metric_value in median_metric_values: + csv_file.write("%f,"%median_metric_value) + +print('Depth map comparison end') + + diff --git a/mrrs/reality_capture/ExportXMP.py b/mrrs/reality_capture/ExportXMP.py index 3c6760c..9c5e1ef 100644 --- a/mrrs/reality_capture/ExportXMP.py +++ b/mrrs/reality_capture/ExportXMP.py @@ -10,7 +10,7 @@ class ExportXMP(CondaNode): - category = 'Meshroom Research' + category = 'MRRS - Reality Capture' documentation = '''Node to create an XMP file from camera calibration.''' diff --git a/mrrs/reality_capture/ImportXMP.py b/mrrs/reality_capture/ImportXMP.py index b6e7f22..7f18438 100644 --- a/mrrs/reality_capture/ImportXMP.py +++ b/mrrs/reality_capture/ImportXMP.py @@ -10,7 +10,7 @@ class ImportXMP(CondaNode): - category = 'Meshroom Research' + category = 'MRRS - Reality Capture' documentation = '''Node to import a camera calibration from an XMP''' From 38f5615801c9af414144a2e5b6f56b005a20b8bf Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 11:28:18 +0200 Subject: [PATCH 15/32] ported load dataset --- mrrs/3DR_benchmark/LoadDataset.py | 231 +++------------------------ mrrs/3DR_benchmark/load_dataset.py | 246 +++++++++++++++++++++++++++++ 2 files changed, 267 insertions(+), 210 deletions(-) create mode 100644 mrrs/3DR_benchmark/load_dataset.py diff --git a/mrrs/3DR_benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py index f2c7598..8feec45 100644 --- a/mrrs/3DR_benchmark/LoadDataset.py +++ b/mrrs/3DR_benchmark/LoadDataset.py @@ -1,33 +1,25 @@ __version__ = "3.0" import os -import json -import numpy as np from meshroom.core import desc -import trimesh +from meshroom.core.plugin import CondaNode -from mrrs.core.geometry import camera_projection, random_sample_points_mesh_2, transform_cg_cv -from mrrs.core.ios import open_depth_map, open_image, save_exr, save_image, sfm_data_from_matrices - -from .datasets import load_dataset - -#FIXME:move this into a command line node? -# Pros: can install mrrs via condanode -# Cons: no debugging for us, or just switch Node to CondaNode? quid processchunk? -class LoadDataset(desc.Node): +class LoadDataset(CondaNode): category = 'MRRS - Benchmark' documentation = '''Util node to open datasets with different data from the images in the .sfm''' envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "load_dataset.py")+'" {allParams}' + inputs = [ desc.File( - name="sfmData", - label="sfmData", - description="Input SfMData.", + name="inputSfM", + label="inputSfM", + description="Input sfmData", value="", ), @@ -64,6 +56,7 @@ class LoadDataset(desc.Node): description='''Will display point cloud or landmarks projection.''', value=False, advanced=True +<<<<<<< HEAD ), desc.ChoiceParam( @@ -75,6 +68,9 @@ class LoadDataset(desc.Node): exclusive=True, advanced=True ), +======= + ) +>>>>>>> ported load dataset ] outputs = [ @@ -118,7 +114,8 @@ class LoadDataset(desc.Node): value=os.path.join(desc.Node.internalFolder, 'depth_maps', '_depthMap.exr'), advanced=True, - visible=False + visible=False, + group="" ), desc.File( @@ -129,19 +126,24 @@ class LoadDataset(desc.Node): value=os.path.join(desc.Node.internalFolder, 'masks', '.png'), advanced=True, - visible=False + visible=False, + group="" ), desc.File( name='landMarksProjDisplay', label='landMarksProjDisplay', +<<<<<<< HEAD description='Generated images for landmarl projection.', +======= + description='Generated images for landmark projection', +>>>>>>> ported load dataset semantic='image', value=os.path.join(desc.Node.internalFolder, 'lm_projs', '.png'), advanced=True, enabled=lambda attr: attr.node.landMarksProj.value, - visible=False + visible=False, ), desc.File( @@ -156,195 +158,4 @@ class LoadDataset(desc.Node): ), ] - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if chunk.node.sfmData.value == '': - chunk.logger.warning( - 'No input InputFolder or sfmData, skipping') - return False - return True - - def processChunk(self, chunk): - """ - Opens the dataset data. - """ - - chunk.logManager.start(chunk.node.verboseLevel.value) - if not self.check_inputs(chunk): - return False - - print("*LoadDataset Starting") - - print("**Importing data") - # Load SFM data from JSON file - sfm_data = json.load(open(chunk.node.sfmData.value, "r")) - #load datset data (may update sfm_data!) - gt_data, sfm_data = load_dataset(sfm_data, chunk.node.datasetType.value) - - # Load meshroom ids - extrinsics_id = [v["poseId"] for v in sfm_data["views"]] - instrinsics_id = [v["intrinsicId"] for v in sfm_data["views"]] - views_id = [v["viewId"] for v in sfm_data["views"]] - - print("**Exporting data") - #Generate SFM data from matrices - if not (len(gt_data["intrinsics"]) == len(gt_data["extrinsics"]) == len(gt_data["image_sizes"])): - raise RuntimeError("Mismatching number of parameters for the sfmData ") - #note: will copy sfm_data - gt_sfm_data = sfm_data_from_matrices(gt_data["extrinsics"], gt_data["intrinsics"], extrinsics_id, instrinsics_id, - gt_data["image_sizes"], sfm_data, sensor_width=gt_data["sensor_size"]) - - #Add dummy resection id (for display) - for i, v in enumerate(gt_sfm_data["views"]): - gt_sfm_data["views"][i]["resectionId"]=str(i) - - #Exports - if chunk.node.initSfmLandmarksVertices.value != 0: - print("**Initialising random SfM landmarks from geometry") - if "structure" in gt_sfm_data: - raise RuntimeError("Landmarks already in sfmData") - if gt_data["mesh"] is None: - raise RuntimeError("Cannot initialise landmarks with no geometry") - - vertices_lm = gt_data["mesh"].vertices.copy() - #meshes in meshroom are in the CG cs, landmarks are CV - vertices_lm=transform_cg_cv(vertices_lm) - - #sampling from mesh or or point cloud - if isinstance(gt_data["mesh"], trimesh.PointCloud) : - vertices_indxs = np.random.choice(list(range(vertices_lm.shape[0])), chunk.node.initSfmLandmarksVertices.value) - vertices_lm = vertices_lm[vertices_indxs] - else: - vertices_lm = random_sample_points_mesh_2([vertices_lm, gt_data["mesh"].faces], - chunk.node.initSfmLandmarksVertices.value) - #compute projections - vertices_projections = [camera_projection(vertices_lm, gt_data["extrinsics"][oi], gt_data["intrinsics"][oi]) for oi in range(len(views_id))] - - if chunk.node.landMarksProj.value: - print("**Exporting %d SfM landmarks projections"%(vertices_lm.shape[0])) - os.makedirs(os.path.dirname(chunk.node.landMarksProjDisplay.value), exist_ok=True) - size_lm=int(np.ceil(gt_data["image_sizes"][0][0]/800)) - lm_color = np.random.random_integers(low=0, high=255, size=[vertices_lm.shape[0], 3]) - for projs, view in zip(vertices_projections, gt_sfm_data["views"]): - prj_img = open_image(view["path"], to_srgb=True) - for i, (x,y) in enumerate(projs[0]): - x=int(x) - y=int(y) - if x-size_lm<0 or y-size_lm<0 or x+size_lm >= gt_data["image_sizes"][0][0] or y+size_lm >= gt_data["image_sizes"][0][1]: - continue - prj_img[y-size_lm:y+size_lm,x-size_lm:x+size_lm,0] = lm_color[i,0] - prj_img[y-size_lm:y+size_lm,x-size_lm:x+size_lm,1] = lm_color[i,1] - prj_img[y-size_lm:y+size_lm,x-size_lm:x+size_lm,2] = lm_color[i,2] - output_image = os.path.join(os.path.dirname(chunk.node.landMarksProjDisplay.value), view["viewId"]+".png") - save_image(output_image, prj_img) - - print("**Exporting %d SfM landmarks"%(vertices_lm.shape[0])) - structure = [] - for vi, v in enumerate(vertices_lm):#FIXME: slow - landmark = {} - landmark["landmarkId"] = str(vi) - landmark["descType"] = "unknown" - landmark["color"] = ["255", "0", "0"] - landmark["X"] = [str(x) for x in v] - landmark["observations"] = [] - #create dummy obs in all views - for oi, i in enumerate(views_id): - #sanity check, the landmark is visible in the view - x,y=vertices_projections[oi][0][vi] - if x<0 or y<0 or x>gt_data["image_sizes"][0][0] or y > gt_data["image_sizes"][0][1]: - continue - obs = {"observationId": str(i), - "featureId": str(oi), - "x": [str(x),str(y)]} - landmark["observations"].append(obs) - structure.append(landmark) - gt_sfm_data["structure"] = structure - - # Save the generated SFM data to JSON file - print("**Writting sfm") - with open(os.path.join(chunk.node.outputSfMData.value), 'w') as f: - json.dump(gt_sfm_data, f, indent=4) - - # Save depth maps if any - if "depth_maps" in gt_data: - print("**Writting depth maps") - os.makedirs(chunk.node.depthMapsFolder.value, exist_ok=True) - for view_id, depth_map, gt_extrinsic, gt_intrinsic in \ - zip(views_id, gt_data["depth_maps"], gt_data["extrinsics"], gt_data["intrinsics"]): - if os.path.exists(depth_map): - depth_map_gt = open_depth_map(depth_map) - else: - continue - #FIXME: move to IO? - #add flags to the depth map for display - camera_center = gt_extrinsic[0:3, 3] - inverse_intr_rot = np.linalg.inv( - gt_intrinsic @ np.linalg.inv(gt_extrinsic[0:3, 0:3])) - #https://openimageio.readthedocs.io/en/v2.4.6.1/imageoutput.html - depth_meta = { - "AliceVision:CArr": camera_center, - "AliceVision:iCamArr": inverse_intr_rot, - "AliceVision:downscale": 1 - } - save_exr(depth_map_gt, os.path.join(chunk.node.depthMapsFolder.value, - str(view_id) + "_depthMap.exr"), custom_header=depth_meta) - - if "masks" not in gt_data and chunk.node.initMasks.value : - from concurrent.futures import ThreadPoolExecutor - from threading import Thread - #try to see if image has alpha - image = open_image(gt_sfm_data["views"][0]["path"]) - if image.shape[-1] == 4: - print("**Init masks from images") - #note: process is io bound - def open_mask(view): - return 255*(open_image(view["path"])[:,:,3]>0) - #FIXME: this blocks main thread - with ThreadPoolExecutor() as threadpool:#auto max worker - gt_data["masks"]=[r for r in threadpool.map(open_mask, gt_sfm_data["views"])] - - print("**Done init masks from images") - #else try to see if image has depth maps - elif "depth_maps" in gt_data: - print("**Init masks from depth maps") - def open_mask(view): - return 255*(open_depth_map(depth_map)>0) - #FIXME: this blocks main thread - with ThreadPoolExecutor() as threadpool:#auto max worker - gt_data["masks"]=[r for r in threadpool.map(open_mask, gt_sfm_data["views"])] - else: - raise RuntimeError("Could not initialise masks from image or depth maps") - - #Save image masks if any - if "masks" in gt_data: - print("**Writting masks") - os.makedirs(chunk.node.maskFolder.value, exist_ok=True) - for mask, view_id in zip(gt_data["masks"], views_id) : - #if we have a list of image, open them - if isinstance(mask, str): - mask=open_image(mask) - save_image(os.path.join(chunk.node.maskFolder.value, str(view_id) + ".png"), mask) - - - #Save ground truth mesh as obj if any - if "mesh" in gt_data : - print("**Writting mesh") - gt_data["mesh"].export(chunk.node.mesh.value) - - #create ply if the mesh is a point cloud (poitn cloud display not supported...) - if isinstance(gt_data["mesh"], trimesh.PointCloud) or len(gt_data["mesh"].faces) == 0: - print("***Writting point cloud preview") - - #We have a special viewer for point cloud in ply - new_display_filename = chunk.node.meshDisplay.value.split(".")[0]+".pc.ply" - gt_data["mesh"].export(new_display_filename) - chunk.node.meshDisplay.value=new_display_filename - - - else: - gt_data["mesh"].export(chunk.node.meshDisplay.value) - - print("*LoadDataset ends") - + diff --git a/mrrs/3DR_benchmark/load_dataset.py b/mrrs/3DR_benchmark/load_dataset.py new file mode 100644 index 0000000..63772ce --- /dev/null +++ b/mrrs/3DR_benchmark/load_dataset.py @@ -0,0 +1,246 @@ +import json +import argparse +import os + +import trimesh +import numpy as np + +from datasets import load_dataset +from mrrs.core.geometry import camera_projection, random_sample_points_mesh_2, transform_cg_cv +from mrrs.core.ios import open_depth_map, open_image, save_exr, save_image, sfm_data_from_matrices + + +parser = argparse.ArgumentParser() +parser.add_argument( + '--inputSfM', + help="", +) +parser.add_argument( + '--datasetType', + help="", +) + +parser.add_argument( + '--initSfmLandmarksVertices', + help="", +) +parser.add_argument( + '--landMarksProj', + help="", +) +parser.add_argument( + '--outputSfMData', + help="", +) +parser.add_argument( + '--depthMapsFolder', + help="", +) +parser.add_argument( + '--initMasks', + help="", +) +parser.add_argument( + '--maskFolder', + help="", +) +parser.add_argument( + '--mesh', + help="", +) +parser.add_argument( + '--meshDisplay', + help="", +) +parser.add_argument( + '--landMarksProjDisplay', + help="", +) +args = parser.parse_args() + +sfm_data=args.inputSfM +dataset_type = args.datasetType +init_sfm_lm_vertices = int(args.initSfmLandmarksVertices) +lm_proj = args.landMarksProj +outputSfMData = args.outputSfMData +depthMapsFolder = args.depthMapsFolder +initMasks = bool(args.initMasks) +maskFolder=args.maskFolder +mesh = args.mesh +meshDisplay = args.meshDisplay +lm_proj_display=args.landMarksProjDisplay + + +print("*LoadDataset Starting") + +print("**Importing data") +# Load SFM data from JSON file +sfm_data = json.load(open(sfm_data, "r")) +#load datset data (may update sfm_data!) +gt_data, sfm_data = load_dataset(sfm_data, dataset_type) + +# Load meshroom ids +extrinsics_id = [v["poseId"] for v in sfm_data["views"]] +instrinsics_id = [v["intrinsicId"] for v in sfm_data["views"]] +views_id = [v["viewId"] for v in sfm_data["views"]] + +print("**Exporting data") +#Generate SFM data from matrices +if not (len(gt_data["intrinsics"]) == len(gt_data["extrinsics"]) == len(gt_data["image_sizes"])): + raise RuntimeError("Mismatching number of parameters for the sfmData ") +#note: will copy sfm_data +gt_sfm_data = sfm_data_from_matrices(gt_data["extrinsics"], gt_data["intrinsics"], extrinsics_id, instrinsics_id, + gt_data["image_sizes"], sfm_data, sensor_width=gt_data["sensor_size"]) + +#Add dummy resection id (for display) +for i, v in enumerate(gt_sfm_data["views"]): + gt_sfm_data["views"][i]["resectionId"]=str(i) + +#Exports +if init_sfm_lm_vertices != 0: + print("**Initialising random SfM landmarks from geometry") + if "structure" in gt_sfm_data: + raise RuntimeError("Landmarks already in sfmData") + if gt_data["mesh"] is None: + raise RuntimeError("Cannot initialise landmarks with no geometry") + + vertices_lm = gt_data["mesh"].vertices.copy() + #meshes in meshroom are in the CG cs, landmarks are CV + vertices_lm=transform_cg_cv(vertices_lm) + + #sampling from mesh or or point cloud + if isinstance(gt_data["mesh"], trimesh.PointCloud) : + vertices_indxs = np.random.choice(list(range(vertices_lm.shape[0])), init_sfm_lm_vertices) + vertices_lm = vertices_lm[vertices_indxs] + else: + vertices_lm = random_sample_points_mesh_2([vertices_lm, gt_data["mesh"].faces], + init_sfm_lm_vertices) + #compute projections + vertices_projections = [camera_projection(vertices_lm, gt_data["extrinsics"][oi], gt_data["intrinsics"][oi]) for oi in range(len(views_id))] + + if lm_proj: + print("**Exporting %d SfM landmarks projections"%(vertices_lm.shape[0])) + os.makedirs(os.path.dirname(lm_proj_display), exist_ok=True) + size_lm=int(np.ceil(gt_data["image_sizes"][0][0]/800)) + lm_color = np.random.random_integers(low=0, high=255, size=[vertices_lm.shape[0], 3]) + for projs, view in zip(vertices_projections, gt_sfm_data["views"]): + prj_img = open_image(view["path"], to_srgb=True) + for i, (x,y) in enumerate(projs[0]): + x=int(x) + y=int(y) + if x-size_lm<0 or y-size_lm<0 or x+size_lm >= gt_data["image_sizes"][0][0] or y+size_lm >= gt_data["image_sizes"][0][1]: + continue + prj_img[y-size_lm:y+size_lm,x-size_lm:x+size_lm,0] = lm_color[i,0] + prj_img[y-size_lm:y+size_lm,x-size_lm:x+size_lm,1] = lm_color[i,1] + prj_img[y-size_lm:y+size_lm,x-size_lm:x+size_lm,2] = lm_color[i,2] + output_image = os.path.join(os.path.dirname(lm_proj_display), view["viewId"]+".png") + save_image(output_image, prj_img) + + print("**Exporting %d SfM landmarks"%(vertices_lm.shape[0])) + structure = [] + for vi, v in enumerate(vertices_lm):#FIXME: slow + landmark = {} + landmark["landmarkId"] = str(vi) + landmark["descType"] = "unknown" + landmark["color"] = ["255", "0", "0"] + landmark["X"] = [str(x) for x in v] + landmark["observations"] = [] + #create dummy obs in all views + for oi, i in enumerate(views_id): + #sanity check, the landmark is visible in the view + x,y=vertices_projections[oi][0][vi] + if x<0 or y<0 or x>gt_data["image_sizes"][0][0] or y > gt_data["image_sizes"][0][1]: + continue + obs = {"observationId": str(i), + "featureId": str(oi), + "x": [str(x),str(y)]} + landmark["observations"].append(obs) + structure.append(landmark) + gt_sfm_data["structure"] = structure + +# Save the generated SFM data to JSON file +print("**Writting sfm") +with open(os.path.join(outputSfMData), 'w') as f: + json.dump(gt_sfm_data, f, indent=4) + +# Save depth maps if any +if "depth_maps" in gt_data: + print("**Writting depth maps") + os.makedirs(depthMapsFolder, exist_ok=True) + for view_id, depth_map, gt_extrinsic, gt_intrinsic in \ + zip(views_id, gt_data["depth_maps"], gt_data["extrinsics"], gt_data["intrinsics"]): + if os.path.exists(depth_map): + depth_map_gt = open_depth_map(depth_map) + else: + continue + #FIXME: move to IO? + #add flags to the depth map for display + camera_center = gt_extrinsic[0:3, 3] + inverse_intr_rot = np.linalg.inv( + gt_intrinsic @ np.linalg.inv(gt_extrinsic[0:3, 0:3])) + #https://openimageio.readthedocs.io/en/v2.4.6.1/imageoutput.html + depth_meta = { + "AliceVision:CArr": camera_center, + "AliceVision:iCamArr": inverse_intr_rot, + "AliceVision:downscale": 1 + } + save_exr(depth_map_gt, os.path.join(depthMapsFolder, + str(view_id) + "_depthMap.exr"), custom_header=depth_meta) + +if "masks" not in gt_data and initMasks : + from concurrent.futures import ThreadPoolExecutor + from threading import Thread + #try to see if image has alpha + image = open_image(gt_sfm_data["views"][0]["path"]) + if image.shape[-1] == 4: + print("**Init masks from images") + #note: process is io bound + def open_mask(view): + return 255*(open_image(view["path"])[:,:,3]>0) + #FIXME: this blocks main thread + with ThreadPoolExecutor() as threadpool:#auto max worker + gt_data["masks"]=[r for r in threadpool.map(open_mask, gt_sfm_data["views"])] + + print("**Done init masks from images") + #else try to see if image has depth maps + elif "depth_maps" in gt_data: + print("**Init masks from depth maps") + def open_mask(view): + return 255*(open_depth_map(depth_map)>0) + #FIXME: this blocks main thread + with ThreadPoolExecutor() as threadpool:#auto max worker + gt_data["masks"]=[r for r in threadpool.map(open_mask, gt_sfm_data["views"])] + else: + raise RuntimeError("Could not initialise masks from image or depth maps") + +#Save image masks if any +if "masks" in gt_data: + print("**Writting masks") + os.makedirs(maskFolder, exist_ok=True) + for mask, view_id in zip(gt_data["masks"], views_id) : + #if we have a list of image, open them + if isinstance(mask, str): + mask=open_image(mask) + save_image(os.path.join(maskFolder, str(view_id) + ".png"), mask) + + +#Save ground truth mesh as obj if any +if "mesh" in gt_data : + print("**Writting mesh") + gt_data["mesh"].export(mesh) + + #create ply if the mesh is a point cloud (poitn cloud display not supported...) + if isinstance(gt_data["mesh"], trimesh.PointCloud) or len(gt_data["mesh"].faces) == 0: + print("***Writting point cloud preview") + + #We have a special viewer for point cloud in ply + new_display_filename = meshDisplay.value.split(".")[0]+".pc.ply" + gt_data["mesh"].export(new_display_filename) + meshDisplay.value=new_display_filename + + + else: + gt_data["mesh"].export(meshDisplay) + +print("*LoadDataset ends") + From 9c53d646c0f501308a898f8dd5c00f3db6538408 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 11:49:20 +0200 Subject: [PATCH 16/32] ported meshcomparison --- mrrs/3DR_benchmark/MeshComparison.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/mrrs/3DR_benchmark/MeshComparison.py b/mrrs/3DR_benchmark/MeshComparison.py index 1b71cf7..73439cb 100644 --- a/mrrs/3DR_benchmark/MeshComparison.py +++ b/mrrs/3DR_benchmark/MeshComparison.py @@ -5,18 +5,16 @@ from .metrics.chamfer_distance import ENV_FILE -class MeshcomparisonBaptiste(CondaNode): +class MeshComparison(CondaNode): - #overides the env path - @property - def env_file(self): - return ENV_FILE - - commandLine = 'python "'+os.path.join(os.path.dirname(__file__),"..", "..", "metrics", "chamfer_distance", "eval_pcd.py")+'" {allParams}' gpu = desc.Level.NONE category = 'MRRS - Benchmark' - documentation = '''Calls the dtu benchmark metrics between two meshes''' + documentation = '''Computes the champfer distance between two meshes''' + + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "metrics", "chamfer_distance", "eval_pcd.py")+'" {allParams}' + + envFile=ENV_FILE inputs = [ desc.File( From c838f2152abedce5709f36e83b89cbb12e0c6e12 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 11:49:35 +0200 Subject: [PATCH 17/32] cleanup colmap and blender --- mrrs/3DR_benchmark/CalibrationComparison.py | 5 - mrrs/3DR_benchmark/LoadDataset.py | 8 - mrrs/blender/CreateTrackingMarkers.py | 339 ------------------ mrrs/blender/Render360.py | 51 --- mrrs/blender/RenderMesh.py | 2 +- mrrs/blender/RenderOverlay.py | 55 --- mrrs/blender/{ => scripts}/alembic_convert.py | 0 .../exctract_ground_truth_ptut.py | 0 .../{ => scripts}/extract_ground_truth.py | 0 mrrs/blender/{ => scripts}/render_mesh.py | 0 mrrs/colmap/AutomaticReconstructor.py | 2 - mrrs/colmap/__init__.py | 5 + 12 files changed, 6 insertions(+), 461 deletions(-) delete mode 100644 mrrs/blender/CreateTrackingMarkers.py delete mode 100644 mrrs/blender/Render360.py delete mode 100644 mrrs/blender/RenderOverlay.py rename mrrs/blender/{ => scripts}/alembic_convert.py (100%) rename mrrs/blender/{ => scripts}/exctract_ground_truth_ptut.py (100%) rename mrrs/blender/{ => scripts}/extract_ground_truth.py (100%) rename mrrs/blender/{ => scripts}/render_mesh.py (100%) diff --git a/mrrs/3DR_benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py index f8a75d0..387b717 100644 --- a/mrrs/3DR_benchmark/CalibrationComparison.py +++ b/mrrs/3DR_benchmark/CalibrationComparison.py @@ -72,12 +72,7 @@ class CalibrationComparison(CondaNode): name='outputCsv', label='Output Csv', description='Output file to generated results.', -<<<<<<< HEAD value=lambda attr: os.path.join(desc.Node.internalFolder, attr.node.csv_name.value), -======= - value=lambda attr: os.path.join(desc.Node.internalFolder, attr.node.csvName.value), - uid=[], ->>>>>>> calibration comparison cli ) ] diff --git a/mrrs/3DR_benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py index 8feec45..093d7c2 100644 --- a/mrrs/3DR_benchmark/LoadDataset.py +++ b/mrrs/3DR_benchmark/LoadDataset.py @@ -56,7 +56,6 @@ class LoadDataset(CondaNode): description='''Will display point cloud or landmarks projection.''', value=False, advanced=True -<<<<<<< HEAD ), desc.ChoiceParam( @@ -68,9 +67,6 @@ class LoadDataset(CondaNode): exclusive=True, advanced=True ), -======= - ) ->>>>>>> ported load dataset ] outputs = [ @@ -133,11 +129,7 @@ class LoadDataset(CondaNode): desc.File( name='landMarksProjDisplay', label='landMarksProjDisplay', -<<<<<<< HEAD description='Generated images for landmarl projection.', -======= - description='Generated images for landmark projection', ->>>>>>> ported load dataset semantic='image', value=os.path.join(desc.Node.internalFolder, 'lm_projs', '.png'), diff --git a/mrrs/blender/CreateTrackingMarkers.py b/mrrs/blender/CreateTrackingMarkers.py deleted file mode 100644 index a24446f..0000000 --- a/mrrs/blender/CreateTrackingMarkers.py +++ /dev/null @@ -1,339 +0,0 @@ -""" -This node creates 3d markers from a given set of 3d landmarks. -Usefull to test cam track. -To be paired with an sfm transform that "straigthen" everything. -""" -__version__ = "3.0" - -import json -import os - -from meshroom.core import desc -from meshroom.core.node import ExecMode -from mrrs.core.geometry import * -from mrrs.core.ios import * -import trimesh - -def filter_landmarks_per_tile(landmarks, nb_voxels, nb_landmarks_per_voxels, min_landmark_per_voxel): - """ - Will filter out landmarks such that we only keep the first nb_landmarks_per_voxels landmark per voxel. - Assumes the landmarks are sorted with first landmarks to keep. - """ - sfm_range = (np.amin(landmarks, axis=0), np.amax(landmarks, axis=0)) - sfm_step = (sfm_range[1]-sfm_range[0])/nb_voxels - final_landmarks_list = [] - for voxel_x in np.arange(sfm_range[0][0], sfm_range[1][0], sfm_step[0]): - for voxel_y in np.arange(sfm_range[0][1], sfm_range[1][1], sfm_step[1]): - for voxel_z in np.arange(sfm_range[0][2], sfm_range[1][2], sfm_step[2]): - print("Filtering for voxel %f-%f %f-%f %f-%f"%(voxel_x, voxel_x+sfm_step[0], - voxel_y, voxel_y+sfm_step[1], - voxel_z, voxel_z+sfm_step[2])) - landmarks_inside = landmarks[ (voxel_x<=landmarks[:, 0])&(landmarks[:, 0] min_landmark_per_voxel: - final_landmarks_list += list(landmarks_inside[:min(nb_landmarks_per_voxels, len(landmarks_inside))]) - return final_landmarks_list - -def get_landmarks_from_sfm_data(sfm_data, sort_mode): - """ - Get landmarks (sorted by track length) - """ - if "structure" not in sfm_data.keys(): - return [], [] - landmarks = [] - landmarks_color = [] - landmarks_track_length = [] - landmarks_track_mean_scale = [] - for landmark in sfm_data["structure"]: - landmarks_track_length.append(len(landmark["observations"])) - landmarks_track_mean_scale.append(np.mean([float(l["scale"]) for l in landmark["observations"]], axis=0)) - landmarks.append(landmark["X"]) - landmarks_color.append(landmark["color"]) - landmarks_track_length = np.asarray(landmarks_track_length, dtype=np.float32) - landmarks = np.asarray(landmarks, dtype=np.float32) - landmarks_track_mean_scale = np.asarray(landmarks_track_mean_scale, dtype=np.float32) - landmarks_color = np.asarray(landmarks, dtype=np.uint8) - - if sort_mode == "longest": - order = landmarks_track_length.argsort() - elif sort_mode == "scale": - mean_scale = np.asarray(landmarks_track_mean_scale) - order = mean_scale.argsort() - else: - raise RuntimeError("Unrecognised sort mode") - - landmarks_sorted = landmarks[order] - landmarks_color = landmarks_color[order] - return landmarks_sorted, landmarks_color - -def display_track_obj(obj_type, landmarks, landmarks_color, landmarks_per_voxel, scene_tiles, min_landmark_per_voxel): - """ - Will return point coordinates corresponding to the longest landmarks. - Also make sure the points are uniformly distributed, in the scene: - will only display n points per voxels. - """ - landmarks = filter_landmarks_per_tile(landmarks, scene_tiles, landmarks_per_voxel, min_landmark_per_voxel) - objs = [] - for landmark_index, landmark in enumerate(landmarks): - obj = {"type": obj_type, - "name": "landmark_"+str(landmark_index), - "coordinates": landmark.tolist(), - "color": landmarks_color[landmark_index].tolist()} - objs.append(obj) - return objs - -def display_track_cones(landmarks, landmarks_color, landmarks_per_voxel=1, scene_tiles=3, min_landmark_per_voxel=0): - return display_track_obj("cone", landmarks, landmarks_color, landmarks_per_voxel, scene_tiles, min_landmark_per_voxel) - -def display_track_spheres(landmarks, landmarks_color, landmarks_per_voxel=1, scene_tiles=3, min_landmark_per_voxel=0): - return display_track_obj("sphere", landmarks, landmarks_color, landmarks_per_voxel, scene_tiles, min_landmark_per_voxel) - -def display_no_tracks(landmarks, landmarks_color, landmarks_per_voxel=1, scene_tiles=3, min_landmark_per_voxel=0): - return [] - -def draw_on_images(json_display, views_id, views_path, extrinsics_all_cams, - intrinsics_all_cam, pixel_sizes_all_cams, output_folder): - """ - Plot the projection of 3D landmarks onto an image. Used for debug mostly. - """ - POINT_THINKESS = 5 - object_colors = (np.random.random([len(json_display), 3])) - color_min = 0 - color_max = 1 - - for view_id, view_path, extrinsic, intrinsic in zip(views_id, views_path, extrinsics_all_cams, intrinsics_all_cam): - try: - image = open_image(view_path) - image = np.ascontiguousarray(image) - color_min = np.amin(image) - color_max = np.amax(image) - except Exception as ex: - print("Issue with image "+view_path+" skipping:") - print(ex) - - for display_object, object_color in zip(json_display, object_colors): - try: - if display_object["type"] == "cones" or display_object["type"] == "sphere": - coordinates = display_object["coordinates"] - # landmark_projected - point_on_cam, z = camera_projection(np.asarray([coordinates], np.float32), extrinsic, intrinsic, pixel_sizes_all_cams[0]) - point_on_cam = point_on_cam[0] - # discard unseen pointss - if point_on_cam[0]<0 or point_on_cam[1]<0: - continue - if point_on_cam[0] >= image.shape[1] or point_on_cam[1] >= image.shape[0]: - continue - if z[0] <= 0: - continue - image[point_on_cam[1]-POINT_THINKESS:point_on_cam[1]+POINT_THINKESS, point_on_cam[0]-POINT_THINKESS:point_on_cam[0]+POINT_THINKESS] = object_color*(color_max-color_min)-color_min - elif display_object["type"] == "obj":#if mesh, display wireframe - import cv2 - mesh = trimesh.load(display_object["file_path"])#FIXME: opens the mesh for each view - vertices = mesh.vertices - faces = mesh.faces - #vertices associated to each face - faces_vertices = vertices[faces] - #vertices projections - projections = [camera_projection(faces_vertices[:,i], extrinsic, intrinsic, pixel_sizes_all_cams[0]) for i in range(3)] - faces_vertices_proj= np.stack([projections[i][0] for i in range(3)], axis=1) - faces_vertices_z = np.stack([projections[i][1] for i in range(3)], axis=-1) - #filter out faces that are not visible - valid_faces = ( np.all(faces_vertices_z>0, axis=-1) & - np.all(np.all(faces_vertices_proj>0, axis=-1), axis=-1) )#& - #np.any(np.any(faces_vertices_proj[:]>0, axis=-1), axis=-1))#FIME: finish all - triangles_to_display=faces_vertices_proj[valid_faces] - if triangles_to_display.shape[0]==0: - continue - # for triangle in triangles_to_display: - cv2.polylines(image, triangles_to_display, isClosed = True, color=(0, 0, 0)) - except Exception as e: - print("Issue with view "+view_id+", skipping :") - print(e) - image_extention = view_path.split(".")[-1] - save_image(os.path.join(output_folder, view_id+"."+image_extention), image) - -class CreateTrackingMarkers(desc.Node): - - category = 'Evaluation' - - documentation = '''This node places some objects in the scene using the landmarks of the sfm.''' - - inputs = [ - - desc.File( - name='sfmData', - label='SfmData', - description='Input SfM file.', - value=desc.Node.internalFolder, - ), - - desc.File( - name='objFile', - label='3D Object', - description='Input obj file to display (optional).', - value="", - ), - - desc.ChoiceParam( - name='track_mode', - label='Track Mode', - description='''Mode to display over the images.''', - value='display_track_cones', - values=['display_track_cones', 'display_track_spheres', 'display_no_tracks'], - exclusive=True, - ), - - desc.ChoiceParam( - name='track_param_sort_mode', - label='Sorting Mode', - description='''Sort Mode to display Track Cones.''', - value='longest', - values=['longest', 'scale'], - enabled=lambda node: node.track_mode.value=='display_track_cones' or node.track_mode.value=='display_track_spheres', - exclusive=True - ), - - #! order important for parameters - desc.IntParam( - name='param_markers_per_voxel', - label='Markers per voxels', - description=''' ''', - value=1, - range=(0, 10000, 1), - enabled=lambda node: node.track_mode.value=='display_track_cones' or node.track_mode.value=='display_track_spheres' - ), - - desc.IntParam( - name='param_voxel_grid_size', - label='Voxel Grid Size', - description='''Grid size to be used. Will only keep N landmarks per voxel.''', - value=10, - range=(0, 10000, 1), - enabled=lambda node: node.track_mode.value=='display_track_cones' or node.track_mode.value=='display_track_spheres' - ), - - desc.IntParam( - name='param_min_landmark_per_voxel', - label='Minimum landmark per voxel', - description='''Will only display landmarks if the voxel as this amount of total landmarks.''', - value=10, - range=(0, 10000, 1), - enabled=lambda node: node.track_mode.value=='display_track_cones' or node.track_mode.value=='display_track_spheres' - ), - - desc.BoolParam( - name="render", - label = "Generate 2D renders", - description='''Will render the markers directly on frames.''', - value=False, - group='', - ), - - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description='''Verbosity level (fatal, error, warning, info, debug, trace).''', - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - ), - ] - - outputs = [ - desc.File( - name='outputFile', - label='Output Json', - description='Output file to place track info to.', - value=os.path.join(desc.Node.internalFolder, "track_objects.json"), - ), - desc.File( - name='outputImages', - label='Output Images', - description='Output image regex if any', - value=os.path.join(desc.Node.internalFolder, "*.png"), - ), - ] - - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if not chunk.node.sfmData.value: - chunk.logger.warning('No input sfmData in node InjectSfmData, skipping') - return False - return True - - def processChunk(self, chunk): - """ - Opens the dataset data. - """ - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - # check inputs - if not self.check_inputs(chunk): - return - chunk.logger.info("Starts to make vizualisation") - with open(chunk.node.sfmData.value,"r") as json_file: - sfm_data = json.load(json_file) - # get landmarks (sorted by track length) - landmarks, landmarks_color = get_landmarks_from_sfm_data(sfm_data, chunk.node.track_param_sort_mode.value) - # generate json corresponding to the method - display_function = eval(chunk.node.track_mode.value) - display_options = [attribute._value for attribute in chunk.node.attributes - if attribute._enabled and attribute.name.startswith("param_")]#note: hacky but works - json_display = display_function(landmarks, landmarks_color, *display_options) - #add mesh if any - if chunk.node.objFile.value != "": - import trimesh#lazy import - #convert mesh to obj - new_mesh_file = os.path.join(os.path.dirname(chunk.node.outputFile.value), os.path.basename(chunk.node.objFile.value)+".obj") - mesh = trimesh.load(chunk.node.objFile.value) - transform = np.identity(4) - transform[1][1] = -1 - transform[2][2] = -1 - mesh.apply_transform(transform) - mesh.export(new_mesh_file) - #add mesh to json - json_display.append({"type": "obj", - "coordinates":(0,0,0), - "name": "3d reconstruction", - "file_path": new_mesh_file, - }) - # write json - with open(chunk.node.outputFile.value, "w") as json_file: - json_file.write(json.dumps(json_display, indent=4)) - - if chunk.node.render.value: - - frame_ids = [view["frameId"] for view in sfm_data["views"]] - (extrinsics_all_cams, intrinsics_all_cams, _, - _, _, pixel_sizes_all_cams) = matrices_from_sfm_data(sfm_data) - views_path = [view["path"] for view in sfm_data["views"]] - draw_on_images(json_display, frame_ids, views_path, extrinsics_all_cams, - intrinsics_all_cams, pixel_sizes_all_cams, os.path.dirname(chunk.node.outputFile.value)) - - chunk.logger.info('Vizualisation done') - finally: - chunk.logManager.end() - - -# # #idea, use track length, texture, clustering, also viz normal and plane, planetlet -# def build_knn_landmarks(landmarks, N): -# """ -# Knn for landmarks, make the nn model and returns a matrix containing the nn indices -# """ -# from annoy import AnnoyIndex -# f = 3 # Length of item vector that will be indexed -# t = AnnoyIndex(f, 'euclidean') -# for i, landmark in enumerate(landmarks): -# t.add_item(i, landmark) -# t.build(10) # 10 trees -# #N nn -# neareast_ns =[] -# for i, landmark in enumerate(landmarks): -# neareast_ns.append(t.get_nns_by_item(i, N, search_k=-1, include_distances=False)) -# return neareast_ns, t - diff --git a/mrrs/blender/Render360.py b/mrrs/blender/Render360.py deleted file mode 100644 index 50572b9..0000000 --- a/mrrs/blender/Render360.py +++ /dev/null @@ -1,51 +0,0 @@ - - -__version__ = "1.1" - -from meshroom.core import desc -import os - -DEFAULT_RENDER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../blender/render360.py')) -# COMMAND_PREFIX = "rez env blender-3.1 --" -class Render360(desc.CommandLineNode): - commandLine = 'blender -b -P {scriptValue} -- {objectFileValue} {outputFolderValue} {renderStepsValue}' - category = 'Evaluation' - documentation = 'This nodes renders an object as in a turntable' - gpu = desc.Level.INTENSIVE - inputs = [ - desc.File( - name='script', - label='Script', - description='Python script to render markers.', - value=DEFAULT_RENDER_SCRIPT, - ), - desc.File( - name='objectFile', - label='objectFile', - description='Object File.', - value='', - ), - - desc.IntParam( - name='renderSteps', - label='renderSteps', - description='Render steps.', - value=64, - range=(0, 1000000, 1), - ), - ] - outputs = [ - desc.File( - name='outputFolder', - label='Folder', - description='Output folder for generated images.', - value=desc.Node.internalFolder, - ), - - desc.File( - name='outputImages', - label='outputImages', - description='Output generated images.', - value=os.path.join(desc.Node.internalFolder, "*.png"), - ), - ] diff --git a/mrrs/blender/RenderMesh.py b/mrrs/blender/RenderMesh.py index 1b343c4..c5edeb8 100644 --- a/mrrs/blender/RenderMesh.py +++ b/mrrs/blender/RenderMesh.py @@ -3,7 +3,7 @@ from meshroom.core import desc import os -RENDER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../blender/render_mesh.py')) +RENDER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), './scripts/render_mesh.py')) class RenderMesh(desc.CommandLineNode): commandLine = 'blender -b -P {scriptValue} -- \ diff --git a/mrrs/blender/RenderOverlay.py b/mrrs/blender/RenderOverlay.py deleted file mode 100644 index 552ec3d..0000000 --- a/mrrs/blender/RenderOverlay.py +++ /dev/null @@ -1,55 +0,0 @@ -__version__ = "1.1" - -from meshroom.core import desc -import os - -DEFAULT_RENDER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../blender/render_overlay_markers.py')) - -class RenderOverlay(desc.CommandLineNode): - commandLine = 'blender -b -P {scriptValue} -- \ - {markersValue} {sizeFactorValue} {sfmDataValue} {outputFolderValue}' - category = 'Evaluation' - documentation = 'This nodes creates 3D objects in a Blender scene and render them on top of the corresponding views' - inputs = [ - desc.File( - name='script', - label='Script', - description='Python script to render markers.', - value=DEFAULT_RENDER_SCRIPT, - ), - desc.File( - name='markers', - label='Markers', - description='3D markers to render.', - value='', - ), - desc.FloatParam( - name='sizeFactor', - label='Size Factor', - description='Marker size factor.', - value=1.0, - range=(0.0, 10.0, 0.1), - ), - desc.File( - name='sfmData', - label='SfM Data', - description='Views, intrinsincs and estimated poses.', - value='', - ), - ] - outputs = [ - desc.File( - name='outputFolder', - label='Folder', - description='Output folder for generated images.', - value=desc.Node.internalFolder, - ), - - desc.File( - name='overlay', - label='Overlay', - description='Rendered views with markers overlay.', - semantic='image', - value=desc.Node.internalFolder + '.jpg', - ), - ] diff --git a/mrrs/blender/alembic_convert.py b/mrrs/blender/scripts/alembic_convert.py similarity index 100% rename from mrrs/blender/alembic_convert.py rename to mrrs/blender/scripts/alembic_convert.py diff --git a/mrrs/blender/exctract_ground_truth_ptut.py b/mrrs/blender/scripts/exctract_ground_truth_ptut.py similarity index 100% rename from mrrs/blender/exctract_ground_truth_ptut.py rename to mrrs/blender/scripts/exctract_ground_truth_ptut.py diff --git a/mrrs/blender/extract_ground_truth.py b/mrrs/blender/scripts/extract_ground_truth.py similarity index 100% rename from mrrs/blender/extract_ground_truth.py rename to mrrs/blender/scripts/extract_ground_truth.py diff --git a/mrrs/blender/render_mesh.py b/mrrs/blender/scripts/render_mesh.py similarity index 100% rename from mrrs/blender/render_mesh.py rename to mrrs/blender/scripts/render_mesh.py diff --git a/mrrs/colmap/AutomaticReconstructor.py b/mrrs/colmap/AutomaticReconstructor.py index 6fec91a..8c6f4c9 100644 --- a/mrrs/colmap/AutomaticReconstructor.py +++ b/mrrs/colmap/AutomaticReconstructor.py @@ -1,7 +1,6 @@ __version__ = "4.0" import os -from sys import platform from meshroom.core import desc from . import COLMAP @@ -19,7 +18,6 @@ class ColmapAutomaticReconstructor(desc.CommandLineNode): description='''Path to images.''', value='', ), - ] outputs = [ diff --git a/mrrs/colmap/__init__.py b/mrrs/colmap/__init__.py index 7267b4d..257d601 100644 --- a/mrrs/colmap/__init__.py +++ b/mrrs/colmap/__init__.py @@ -1,6 +1,11 @@ import os from sys import platform +from shutil import which + +if which('colmap') is None: + print("[warning] mrrs: 'colmap' command not found, colmap nodes cannot be computed: \n") + COLMAP="" if platform == "linux" or platform == "linux2": if 'REZ_ENV' in os.environ: From 49d6fd8029c600fe51b11f0e5675638abe7968b5 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 13:44:34 +0200 Subject: [PATCH 18/32] added blender --- meshroomPlugin.json | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/meshroomPlugin.json b/meshroomPlugin.json index cfdf7ae..cb97016 100644 --- a/meshroomPlugin.json +++ b/meshroomPlugin.json @@ -5,8 +5,8 @@ "pipelineFolder":"mrrs/3DR_benchmark/pipelines" }, { - "pluginName":"Gaussian_Splatting", - "nodesFolder":"mrrs/gaussian_splatting" + "pluginName":"Blender", + "nodesFolder":"mrrs/blender" }, { "pluginName":"Colmap", @@ -14,6 +14,10 @@ "pipelineFolder":"mrrs/colmap/pipelines" }, { + "pluginName":"Gaussian_Splatting", + "nodesFolder":"mrrs/gaussian_splatting" + }, + { "pluginName":"Deep_Depth_Maps", "nodesFolder":"mrrs/deep_depth_map" }, From 183ef3b1cbf2cac3e68619da6b8012f89bc91b11 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 13:44:40 +0200 Subject: [PATCH 19/32] cosmetics --- mrrs/3DR_benchmark/DepthMapComparison.py | 1 - mrrs/blender/RenderMesh.py | 2 +- mrrs/blender/SyntheticDataset.py | 2 +- mrrs/colmap/AutomaticReconstructor.py | 2 +- mrrs/colmap/Colmap2MeshroomSfmConvertions.py | 3 +- mrrs/colmap/DelaunayMesher.py | 2 +- mrrs/colmap/FeatureExtraction.py | 2 +- mrrs/colmap/FeatureMatching.py | 2 +- mrrs/colmap/ImageUndistorder.py | 11 +-- mrrs/colmap/ImportColmapDepthMaps.py | 4 +- mrrs/colmap/Mapper.py | 75 +------------------- mrrs/colmap/Meshroom2ColmapSfmConvertions.py | 4 +- mrrs/colmap/PatchMatchStereo.py | 8 +-- mrrs/colmap/PoissonMesher.py | 11 +-- mrrs/colmap/StereoFusion.py | 9 +-- 15 files changed, 17 insertions(+), 121 deletions(-) diff --git a/mrrs/3DR_benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py index 35df2fd..5a6f5b8 100644 --- a/mrrs/3DR_benchmark/DepthMapComparison.py +++ b/mrrs/3DR_benchmark/DepthMapComparison.py @@ -6,7 +6,6 @@ class DepthMapComparison(CondaNode): - # size = desc.DynamicNodeSize('inputSfM') category = 'MRRS - Benchmark' documentation = '''For each camera, compare its depth maps to a given ground truth. diff --git a/mrrs/blender/RenderMesh.py b/mrrs/blender/RenderMesh.py index c5edeb8..881529c 100644 --- a/mrrs/blender/RenderMesh.py +++ b/mrrs/blender/RenderMesh.py @@ -8,7 +8,7 @@ class RenderMesh(desc.CommandLineNode): commandLine = 'blender -b -P {scriptValue} -- \ {cameras} {model} {renderMode} {output}' - category = 'Evaluation' + category = 'MRRS - Blender' gpu = desc.Level.INTENSIVE documentation = 'This nodes creates 3D objects in a Blender scene and render them on top of the corresponding views' inputs = [ diff --git a/mrrs/blender/SyntheticDataset.py b/mrrs/blender/SyntheticDataset.py index ac541fe..c92b55d 100644 --- a/mrrs/blender/SyntheticDataset.py +++ b/mrrs/blender/SyntheticDataset.py @@ -22,7 +22,7 @@ class SyntheticDataset(desc.InitNode, desc.CommandLineNode): size = SyntheticDatasetNodeSize('') - category = 'Evaluation' + category = 'MRRS - Blender' documentation = 'Utility node to load an evaluation dataset from a given folder.' inputs = [ diff --git a/mrrs/colmap/AutomaticReconstructor.py b/mrrs/colmap/AutomaticReconstructor.py index 8c6f4c9..180bb56 100644 --- a/mrrs/colmap/AutomaticReconstructor.py +++ b/mrrs/colmap/AutomaticReconstructor.py @@ -8,7 +8,7 @@ class ColmapAutomaticReconstructor(desc.CommandLineNode): commandLine = COLMAP+' automatic_reconstructor {allParams}' - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/Colmap2MeshroomSfmConvertions.py b/mrrs/colmap/Colmap2MeshroomSfmConvertions.py index 036c578..17b709d 100644 --- a/mrrs/colmap/Colmap2MeshroomSfmConvertions.py +++ b/mrrs/colmap/Colmap2MeshroomSfmConvertions.py @@ -6,6 +6,7 @@ import os import numpy as np + from meshroom.core import desc @@ -231,7 +232,7 @@ class Colmap2MeshroomSfmConvertion(desc.Node): Converts colmap's sfm infos into meshroom format """ - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''Converts colmap's sfm infos into meshroom format''' inputs = [ diff --git a/mrrs/colmap/DelaunayMesher.py b/mrrs/colmap/DelaunayMesher.py index d339b88..e159978 100644 --- a/mrrs/colmap/DelaunayMesher.py +++ b/mrrs/colmap/DelaunayMesher.py @@ -13,7 +13,7 @@ class DelaunayMesher(desc.CommandLineNode): commandLine = COLMAP+' delaunay_mesher {allParams} --input_type dense' - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/FeatureExtraction.py b/mrrs/colmap/FeatureExtraction.py index 9530e62..77c9b13 100644 --- a/mrrs/colmap/FeatureExtraction.py +++ b/mrrs/colmap/FeatureExtraction.py @@ -11,7 +11,7 @@ class ColmapFeatureExtraction(desc.CommandLineNode): commandLine = COLMAP+' feature_extractor {allParams}' #FIXME --ImageReader.single_camera 1 - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/FeatureMatching.py b/mrrs/colmap/FeatureMatching.py index a3992a3..f961941 100644 --- a/mrrs/colmap/FeatureMatching.py +++ b/mrrs/colmap/FeatureMatching.py @@ -10,7 +10,7 @@ class ColmapFeatureMatching(desc.CommandLineNode): commandLine = COLMAP+' exhaustive_matcher {allParams}' - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/ImageUndistorder.py b/mrrs/colmap/ImageUndistorder.py index 326b86c..83daa75 100644 --- a/mrrs/colmap/ImageUndistorder.py +++ b/mrrs/colmap/ImageUndistorder.py @@ -1,15 +1,6 @@ -# $ colmap image_undistorter \ -# --image_path $DATASET_PATH/images \ -# --input_path $DATASET_PATH/sparse/0 \ -# --output_path $DATASET_PATH/dense \ -# --output_type COLMAP \ -# --max_image_size 2000 - __version__ = "1.1" import os -from sys import platform - from meshroom.core import desc from . import COLMAP @@ -17,7 +8,7 @@ class ColmapImageUndistorder(desc.CommandLineNode): commandLine = COLMAP+' image_undistorter {allParams}' - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/ImportColmapDepthMaps.py b/mrrs/colmap/ImportColmapDepthMaps.py index a32d86a..823024b 100644 --- a/mrrs/colmap/ImportColmapDepthMaps.py +++ b/mrrs/colmap/ImportColmapDepthMaps.py @@ -8,8 +8,6 @@ import os import json -import re -import cv2 import glob from meshroom.core import desc @@ -36,7 +34,7 @@ def read_array(path): class ImportColmapDepthMaps(desc.Node): - category = 'Meshroom Research' + category = 'MRRS - Colmap' documentation = '''''' diff --git a/mrrs/colmap/Mapper.py b/mrrs/colmap/Mapper.py index dae77cd..e7d0834 100644 --- a/mrrs/colmap/Mapper.py +++ b/mrrs/colmap/Mapper.py @@ -1,80 +1,7 @@ -# C:\Dev>colmap mapper -h - -# -h [ --help ] -# --random_seed arg (=0) -# --log_to_stderr arg (=0) -# --log_level arg (=2) -# --project_path arg -# --database_path arg -# --image_path arg -# --input_path arg -# --output_path arg -# --image_list_path arg -# --Mapper.min_num_matches arg (=15) -# --Mapper.ignore_watermarks arg (=0) -# --Mapper.multiple_models arg (=1) -# --Mapper.max_num_models arg (=50) -# --Mapper.max_model_overlap arg (=20) -# --Mapper.min_model_size arg (=10) -# --Mapper.init_image_id1 arg (=-1) -# --Mapper.init_image_id2 arg (=-1) -# --Mapper.init_num_trials arg (=200) -# --Mapper.extract_colors arg (=1) -# --Mapper.num_threads arg (=-1) -# --Mapper.min_focal_length_ratio arg (=0.10000000000000001) -# --Mapper.max_focal_length_ratio arg (=10) -# --Mapper.max_extra_param arg (=1) -# --Mapper.ba_refine_focal_length arg (=1) -# --Mapper.ba_refine_principal_point arg (=0) -# --Mapper.ba_refine_extra_params arg (=1) -# --Mapper.ba_min_num_residuals_for_multi_threading arg (=50000) -# --Mapper.ba_local_num_images arg (=6) -# --Mapper.ba_local_function_tolerance arg (=0) -# --Mapper.ba_local_max_num_iterations arg (=25) -# --Mapper.ba_global_use_pba arg (=0) -# --Mapper.ba_global_pba_gpu_index arg (=-1) -# --Mapper.ba_global_images_ratio arg (=1.1000000000000001) -# --Mapper.ba_global_points_ratio arg (=1.1000000000000001) -# --Mapper.ba_global_images_freq arg (=500) -# --Mapper.ba_global_points_freq arg (=250000) -# --Mapper.ba_global_function_tolerance arg (=0) -# --Mapper.ba_global_max_num_iterations arg (=50) -# --Mapper.ba_global_max_refinements arg (=5) -# --Mapper.ba_global_max_refinement_change arg (=0.00050000000000000001) -# --Mapper.ba_local_max_refinements arg (=2) -# --Mapper.ba_local_max_refinement_change arg (=0.001) -# --Mapper.snapshot_path arg -# --Mapper.snapshot_images_freq arg (=0) -# --Mapper.fix_existing_images arg (=0) -# --Mapper.init_min_num_inliers arg (=100) -# --Mapper.init_max_error arg (=4) -# --Mapper.init_max_forward_motion arg (=0.94999999999999996) -# --Mapper.init_min_tri_angle arg (=16) -# --Mapper.init_max_reg_trials arg (=2) -# --Mapper.abs_pose_max_error arg (=12) -# --Mapper.abs_pose_min_num_inliers arg (=30) -# --Mapper.abs_pose_min_inlier_ratio arg (=0.25) -# --Mapper.filter_max_reproj_error arg (=4) -# --Mapper.filter_min_tri_angle arg (=1.5) -# --Mapper.max_reg_trials arg (=3) -# --Mapper.local_ba_min_tri_angle arg (=6) -# --Mapper.tri_max_transitivity arg (=1) -# --Mapper.tri_create_max_angle_error arg (=2) -# --Mapper.tri_continue_max_angle_error arg (=2) -# --Mapper.tri_merge_max_reproj_error arg (=4) -# --Mapper.tri_complete_max_reproj_error arg (=4) -# --Mapper.tri_complete_max_transitivity arg (=5) -# --Mapper.tri_re_max_angle_error arg (=5) -# --Mapper.tri_re_min_ratio arg (=0.20000000000000001) -# --Mapper.tri_re_max_trials arg (=1) -# --Mapper.tri_min_angle arg (=1.5) -# --Mapper.tri_ignore_two_view_tracks arg (=1) - __version__ = "2.0" import os import shutil -from sys import platform from meshroom.core import desc from . import COLMAP @@ -82,7 +9,7 @@ class ColmapMapper(desc.CommandLineNode): commandLine = COLMAP+' mapper {allParams}'# --output_type TXT - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/Meshroom2ColmapSfmConvertions.py b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py index 491ab49..8dfa773 100644 --- a/mrrs/colmap/Meshroom2ColmapSfmConvertions.py +++ b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py @@ -7,13 +7,11 @@ from mrrs.core.ios import open_image, save_image from mrrs.core.utils import cv2_resize_with_pad -from . import COLMAP - class Meshroom2ColmapSfmConvertions(desc.CommandLineNode): commandLine = 'aliceVision_exportColmap -i {preparedSfmValue} -o {outputValue} ' size = desc.DynamicNodeSize('input') - category = 'Colmap' + category = 'MRRS - Colmap' documentation = ''' ''' inputs = [ diff --git a/mrrs/colmap/PatchMatchStereo.py b/mrrs/colmap/PatchMatchStereo.py index efa9306..cfe4372 100644 --- a/mrrs/colmap/PatchMatchStereo.py +++ b/mrrs/colmap/PatchMatchStereo.py @@ -1,13 +1,7 @@ -# $ colmap patch_match_stereo \ -# --workspace_path $DATASET_PATH/dense \ -# --workspace_format COLMAP \ -# --PatchMatchStereo.geom_consistency true - __version__ = "2.0" import shutil import os -from sys import platform from meshroom.core import desc from . import COLMAP @@ -15,7 +9,7 @@ class PatchMatchStereo(desc.CommandLineNode): commandLine = COLMAP+' patch_match_stereo {allParams}' gpu = desc.Level.INTENSIVE - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/PoissonMesher.py b/mrrs/colmap/PoissonMesher.py index e404a88..abdfa6b 100644 --- a/mrrs/colmap/PoissonMesher.py +++ b/mrrs/colmap/PoissonMesher.py @@ -1,22 +1,17 @@ -# $ colmap stereo_fusion \ -# --workspace_path $DATASET_PATH/dense \ -# --workspace_format COLMAP \ -# --input_type geometric \ -# --output_mesh $DATASET_PATH/dense/fused.ply - __version__ = "2.0" import os from meshroom.core import desc -from . import COLMAP import trimesh + +from . import COLMAP from mrrs.core.geometry import CG_CV_MAT44 class PoissonMesher(desc.CommandLineNode): commandLine = COLMAP+' poisson_mesher {input_path} --PoissonMeshing.trim {trimValue} --output_path {output_meshValue}' - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ diff --git a/mrrs/colmap/StereoFusion.py b/mrrs/colmap/StereoFusion.py index d6c9d46..c2dabc0 100644 --- a/mrrs/colmap/StereoFusion.py +++ b/mrrs/colmap/StereoFusion.py @@ -1,14 +1,7 @@ -# $ colmap stereo_fusion \ -# --workspace_path $DATASET_PATH/dense \ -# --workspace_format COLMAP \ -# --input_type geometric \ -# --output_path $DATASET_PATH/dense/fused.ply - __version__ = "2.0" import shutil import os -from sys import platform from meshroom.core import desc from . import COLMAP @@ -16,7 +9,7 @@ class StereoFusion(desc.CommandLineNode): commandLine = COLMAP+' stereo_fusion {allParams}' - category = 'Colmap' + category = 'MRRS - Colmap' documentation = '''''' inputs = [ From 43ec97cf63f31a2b73068c4c1638c40a45df092b Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 14:42:04 +0200 Subject: [PATCH 20/32] added try catch to avoid meshroom import crash --- mrrs/3DR_benchmark/datasets/__init__.py | 116 ++++++++++++------------ 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/mrrs/3DR_benchmark/datasets/__init__.py b/mrrs/3DR_benchmark/datasets/__init__.py index 784dea2..183c627 100644 --- a/mrrs/3DR_benchmark/datasets/__init__.py +++ b/mrrs/3DR_benchmark/datasets/__init__.py @@ -1,65 +1,65 @@ -from mrrs.core.geometry import is_rotation_mat +try: + from mrrs.core.geometry import is_rotation_mat -from .eth3d import open_dataset as open_dataset_eth3d -from .baptiste import open_dataset as open_dataset_baptiste -from .blendedMVG import open_dataset as open_dataset_blended -from .dtu import open_dataset as open_dataset_dtu -from .alab import open_dataset as open_dataset_alab -from .nerf import open_dataset as open_dataset_nerf + from .eth3d import open_dataset as open_dataset_eth3d + from .baptiste import open_dataset as open_dataset_baptiste + from .blendedMVG import open_dataset as open_dataset_blended + from .dtu import open_dataset as open_dataset_dtu + from .alab import open_dataset as open_dataset_alab + from .nerf import open_dataset as open_dataset_nerf -def load_dataset(sfm_data, dataset_type): - """ - Loads the gt data corresponding to input images in sfm_data. - """ - #sort by view name (handy for several of the dataset) - sfm_data["views"]=sorted(sfm_data["views"], key=lambda v:int(v["frameId"])) + def load_dataset(sfm_data, dataset_type): + """ + Loads the gt data corresponding to input images in sfm_data. + """ + #sort by view name (handy for several of the dataset) + sfm_data["views"]=sorted(sfm_data["views"], key=lambda v:int(v["frameId"])) - # data = { - # # Initialize lists to store scene images, calibrations, depths and masks (one per view) - # "images_sizes":[], - # "depth_maps":[], - # "masks":[], - # "extrinsics":[], - # "intrinsics":[], - # "sensor_size":35, #note, by default the sensor size is set to 35mm - # # Initialise geometry (one per scene) - # "mesh":None, - # } + # data = { + # # Initialize lists to store scene images, calibrations, depths and masks (one per view) + # "images_sizes":[], + # "depth_maps":[], + # "masks":[], + # "extrinsics":[], + # "intrinsics":[], + # "sensor_size":35, #note, by default the sensor size is set to 35mm + # # Initialise geometry (one per scene) + # "mesh":None, + # } - # Load data - if dataset_type == "blendedMVG": - print("**Importing blendedMVG data") - data = open_dataset_blended(sfm_data) - elif dataset_type == "DTU": - print("**Importing DTU data") - data = open_dataset_dtu(sfm_data) - elif dataset_type == "ETH3D": - print("**Importing ETH3D data") - data = open_dataset_eth3d(sfm_data) - elif dataset_type == "baptiste": - print("**Importing Baptiste data") - data = open_dataset_baptiste(sfm_data) - elif dataset_type == "alab": - print("**Importing alab data") - data = open_dataset_alab(sfm_data) - elif dataset_type == "NERF": - print("**Importing NERF data") - data = open_dataset_nerf(sfm_data) - else: - raise RuntimeError("Dataset type not supported") - - #sanity check rotation matrix, as its a common error - for e in data["extrinsics"]: - if not is_rotation_mat(e[0:3,0:3]): - raise ValueError("Issue with rotation matrix") + # Load data + if dataset_type == "blendedMVG": + print("**Importing blendedMVG data") + data = open_dataset_blended(sfm_data) + elif dataset_type == "DTU": + print("**Importing DTU data") + data = open_dataset_dtu(sfm_data) + elif dataset_type == "ETH3D": + print("**Importing ETH3D data") + data = open_dataset_eth3d(sfm_data) + elif dataset_type == "baptiste": + print("**Importing Baptiste data") + data = open_dataset_baptiste(sfm_data) + elif dataset_type == "alab": + print("**Importing alab data") + data = open_dataset_alab(sfm_data) + elif dataset_type == "NERF": + print("**Importing NERF data") + data = open_dataset_nerf(sfm_data) + else: + raise RuntimeError("Dataset type not supported") + + #sanity check rotation matrix, as its a common error + for e in data["extrinsics"]: + if not is_rotation_mat(e[0:3,0:3]): + raise ValueError("Issue with rotation matrix") - #if sensor size, not specified, assumes 35mm - if "sensor_size" not in data : - data["sensor_size"] = 35 - print("Sensor size set to default (35mm)") + #if sensor size, not specified, assumes 35mm + if "sensor_size" not in data : + data["sensor_size"] = 35 + print("Sensor size set to default (35mm)") - return data, sfm_data - -if __name__ == "__main__": - pass \ No newline at end of file + return data, sfm_data +except: + pass From 9a96cb5cf74fbea337ec956c0510179f71c88ea8 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 14:43:39 +0200 Subject: [PATCH 21/32] added deep feature --- meshroomPlugin.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/meshroomPlugin.json b/meshroomPlugin.json index cb97016..2adaf7c 100644 --- a/meshroomPlugin.json +++ b/meshroomPlugin.json @@ -22,6 +22,10 @@ "nodesFolder":"mrrs/deep_depth_map" }, { + "pluginName":"Deep_Feature_Matching", + "nodesFolder":"mrrs/deep_feature_matching" + }, + { "pluginName":"Nerf_studio", "nodesFolder":"mrrs/nerf" }, From 22580b9f5df88aa5e0cfb157c46f8c72b3706050 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 14:48:25 +0200 Subject: [PATCH 22/32] added gpu flag --- mrrs/colmap/PatchMatchStereo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mrrs/colmap/PatchMatchStereo.py b/mrrs/colmap/PatchMatchStereo.py index cfe4372..1bd30b3 100644 --- a/mrrs/colmap/PatchMatchStereo.py +++ b/mrrs/colmap/PatchMatchStereo.py @@ -7,7 +7,7 @@ from . import COLMAP class PatchMatchStereo(desc.CommandLineNode): - commandLine = COLMAP+' patch_match_stereo {allParams}' + commandLine = COLMAP+' patch_match_stereo {allParams} --PatchMatchStereo.gpu_index 0' gpu = desc.Level.INTENSIVE category = 'MRRS - Colmap' documentation = '''''' From 99512ae2758a8843d7197eaaf42191c65a850bc2 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 18 Jun 2024 14:49:16 +0200 Subject: [PATCH 23/32] added flag --- mrrs/colmap/DelaunayMesher.py | 6 ------ mrrs/colmap/FeatureExtraction.py | 1 - mrrs/colmap/FeatureMatching.py | 1 - 3 files changed, 8 deletions(-) diff --git a/mrrs/colmap/DelaunayMesher.py b/mrrs/colmap/DelaunayMesher.py index e159978..16918d9 100644 --- a/mrrs/colmap/DelaunayMesher.py +++ b/mrrs/colmap/DelaunayMesher.py @@ -1,9 +1,3 @@ -# $ colmap stereo_fusion \ -# --workspace_path $DATASET_PATH/dense \ -# --workspace_format COLMAP \ -# --input_type geometric \ -# --output_path $DATASET_PATH/dense/fused.ply - __version__ = "2.0" import os diff --git a/mrrs/colmap/FeatureExtraction.py b/mrrs/colmap/FeatureExtraction.py index 77c9b13..38a71b8 100644 --- a/mrrs/colmap/FeatureExtraction.py +++ b/mrrs/colmap/FeatureExtraction.py @@ -1,6 +1,5 @@ __version__ = "1.1" - import os import json import shutil diff --git a/mrrs/colmap/FeatureMatching.py b/mrrs/colmap/FeatureMatching.py index f961941..001b724 100644 --- a/mrrs/colmap/FeatureMatching.py +++ b/mrrs/colmap/FeatureMatching.py @@ -2,7 +2,6 @@ import os import shutil -from sys import platform from meshroom.core import desc from . import COLMAP From 80a3819b2684c00e5cd8b2174c4209ebefc9b539 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Thu, 25 Jul 2024 11:43:17 +0200 Subject: [PATCH 24/32] port colmap --- README.md | 2 +- mrrs/3DR_benchmark/CalibrationComparison.py | 5 +- mrrs/3DR_benchmark/DepthMapComparison.py | 5 +- mrrs/3DR_benchmark/LoadDataset.py | 5 +- mrrs/3DR_benchmark/MeshComparison.py | 5 +- mrrs/3DR_benchmark/general_env.yaml | 2 + .../chamfer_distance/environnement.yaml | 1 + mrrs/colmap/Colmap2MeshroomSfmConvertions.py | 266 +---------------- mrrs/colmap/DelaunayMesher.py | 2 +- mrrs/colmap/ImportColmapDepthMaps.py | 119 +------- mrrs/colmap/Meshroom2ColmapSfmConvertions.py | 24 +- mrrs/colmap/PatchMatchStereo.py | 2 +- mrrs/colmap/PoissonMesher.py | 13 +- mrrs/colmap/StereoFusion.py | 2 +- .../colmap_2_meshroom_sfm_convertion.py | 277 ++++++++++++++++++ mrrs/colmap/env.yaml | 16 + mrrs/colmap/import_colmap_depth_maps.py | 91 ++++++ mrrs/deep_depth_map/VizMVSNet.py | 28 +- .../DeepFeatureExtraction.py | 4 +- .../deep_feature_matching/LightGlueMatcher.py | 4 +- mrrs/deep_feature_matching/LoftrMatcher.py | 4 +- mrrs/gaussian_splatting/ios.py | 64 ++++ mrrs/nerf/nerfstudio.py | 4 +- mrrs/nerf/nerfstudio_export.py | 4 +- mrrs/reality_capture/ExportXMP.py | 4 +- mrrs/reality_capture/ImportXMP.py | 4 +- mrrs/utils/ComputeNormals.py | 107 ------- mrrs/utils/DepthMapTransform.py | 211 ++++++------- mrrs/utils/ExecuteCmdConda.py | 47 --- mrrs/utils/InjectSfmData.py | 94 +++--- mrrs/utils/MeshTransform.py | 49 ++-- mrrs/utils/Seq2Video.py | 49 ---- mrrs/utils/utils_env.yaml | 15 + 33 files changed, 710 insertions(+), 819 deletions(-) create mode 100644 mrrs/colmap/colmap_2_meshroom_sfm_convertion.py create mode 100644 mrrs/colmap/env.yaml create mode 100644 mrrs/colmap/import_colmap_depth_maps.py create mode 100644 mrrs/gaussian_splatting/ios.py delete mode 100644 mrrs/utils/ComputeNormals.py delete mode 100644 mrrs/utils/ExecuteCmdConda.py delete mode 100644 mrrs/utils/Seq2Video.py create mode 100644 mrrs/utils/utils_env.yaml diff --git a/README.md b/README.md index 26f9f71..1008969 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,7 @@ Contributions to Meshroom-Research are welcomed! Here's a quick overview of the - `mrrs/`: Contains the code and the nodes related to a plugin feature. - `mrrs/meshrooPlugin.json`: Contains the list of plugins in this collection. -Utilize Meshroom's nodal UI for seamless integration, and refer to the [Meshroom's repo](https://github.com/alicevision/Meshroom) for creating custom nodes. We've introduced new types of node (eg. CondaNode and DockerNode), which automates environment management for your convenience. +Utilize Meshroom's nodal UI for seamless integration, and refer to the [Meshroom's repo](https://github.com/alicevision/Meshroom) for creating custom nodes. We've introduced new types of node (eg. PluginNode and DockerNode), which automates environment management for your convenience. See meshroom's [plugin documentation](https://github.com/alicevision/Meshroom/tree/dev/plugin_system/meshroom/core) to leanrn how to make your own plugins. diff --git a/mrrs/3DR_benchmark/CalibrationComparison.py b/mrrs/3DR_benchmark/CalibrationComparison.py index 387b717..efb8cbf 100644 --- a/mrrs/3DR_benchmark/CalibrationComparison.py +++ b/mrrs/3DR_benchmark/CalibrationComparison.py @@ -7,9 +7,9 @@ import os from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType -class CalibrationComparison(CondaNode): +class CalibrationComparison(PluginCommandLineNode): category = 'MRRS - Benchmark' documentation = '''For each camera, compare its estimated parameters with a given groud truth.''' @@ -17,6 +17,7 @@ class CalibrationComparison(CondaNode): commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "calibration_comparison.py")+'" {allParams}' envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") + envType = EnvType.CONDA inputs = [ desc.File( diff --git a/mrrs/3DR_benchmark/DepthMapComparison.py b/mrrs/3DR_benchmark/DepthMapComparison.py index 5a6f5b8..d76cb5c 100644 --- a/mrrs/3DR_benchmark/DepthMapComparison.py +++ b/mrrs/3DR_benchmark/DepthMapComparison.py @@ -2,9 +2,9 @@ import os from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType -class DepthMapComparison(CondaNode): +class DepthMapComparison(PluginCommandLineNode): category = 'MRRS - Benchmark' @@ -17,6 +17,7 @@ class DepthMapComparison(CondaNode): commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "depth_map_comparison.py")+'" {allParams}' envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") + envType = EnvType.CONDA inputs = [ desc.File( diff --git a/mrrs/3DR_benchmark/LoadDataset.py b/mrrs/3DR_benchmark/LoadDataset.py index 093d7c2..2b4dca2 100644 --- a/mrrs/3DR_benchmark/LoadDataset.py +++ b/mrrs/3DR_benchmark/LoadDataset.py @@ -3,14 +3,15 @@ import os from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType -class LoadDataset(CondaNode): +class LoadDataset(PluginCommandLineNode): category = 'MRRS - Benchmark' documentation = '''Util node to open datasets with different data from the images in the .sfm''' envFile = os.path.join(os.path.dirname(__file__), "general_env.yaml") + envType = EnvType.CONDA commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "load_dataset.py")+'" {allParams}' diff --git a/mrrs/3DR_benchmark/MeshComparison.py b/mrrs/3DR_benchmark/MeshComparison.py index 73439cb..aaa2067 100644 --- a/mrrs/3DR_benchmark/MeshComparison.py +++ b/mrrs/3DR_benchmark/MeshComparison.py @@ -1,11 +1,11 @@ __version__ = "1.0" import os from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType from .metrics.chamfer_distance import ENV_FILE -class MeshComparison(CondaNode): +class MeshComparison(PluginCommandLineNode): gpu = desc.Level.NONE @@ -15,6 +15,7 @@ class MeshComparison(CondaNode): commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "metrics", "chamfer_distance", "eval_pcd.py")+'" {allParams}' envFile=ENV_FILE + envType = EnvType.CONDA inputs = [ desc.File( diff --git a/mrrs/3DR_benchmark/general_env.yaml b/mrrs/3DR_benchmark/general_env.yaml index fd4d02a..ce24268 100644 --- a/mrrs/3DR_benchmark/general_env.yaml +++ b/mrrs/3DR_benchmark/general_env.yaml @@ -10,5 +10,7 @@ dependencies: - py-openimageio - pip - pip: + - trimesh + - psutil #needed by meshroom #- git+https://github.com/alicevision/MeshroomResearch.git - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # diff --git a/mrrs/3DR_benchmark/metrics/chamfer_distance/environnement.yaml b/mrrs/3DR_benchmark/metrics/chamfer_distance/environnement.yaml index 76602ae..45481de 100644 --- a/mrrs/3DR_benchmark/metrics/chamfer_distance/environnement.yaml +++ b/mrrs/3DR_benchmark/metrics/chamfer_distance/environnement.yaml @@ -13,6 +13,7 @@ dependencies: - trimesh - igl - matplotlib +- psutil # - pip # - pip: # - pyoctree diff --git a/mrrs/colmap/Colmap2MeshroomSfmConvertions.py b/mrrs/colmap/Colmap2MeshroomSfmConvertions.py index 17b709d..82593fb 100644 --- a/mrrs/colmap/Colmap2MeshroomSfmConvertions.py +++ b/mrrs/colmap/Colmap2MeshroomSfmConvertions.py @@ -1,233 +1,10 @@ __version__ = "2.0" -import collections -import struct -import json import os - -import numpy as np - from meshroom.core import desc +from meshroom.core.plugin import PluginCommandLineNode, EnvType - -#from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py -#TODO: make repo import -CameraModel = collections.namedtuple( - "CameraModel", ["model_id", "model_name", "num_params"]) -Camera = collections.namedtuple( - "Camera", ["id", "model", "width", "height", "params"]) -BaseImage = collections.namedtuple( - "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) - -def qvec2rotmat(qvec): - return np.array([ - [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, - 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], - 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], - [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], - 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, - 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], - [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], - 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], - 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) - -class Image(BaseImage): - def qvec2rotmat(self): - return qvec2rotmat(self.qvec) - -CAMERA_MODELS = { - CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), - CameraModel(model_id=1, model_name="PINHOLE", num_params=4), - CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), - CameraModel(model_id=3, model_name="RADIAL", num_params=5), - CameraModel(model_id=4, model_name="OPENCV", num_params=8), - CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), - CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), - CameraModel(model_id=7, model_name="FOV", num_params=5), - CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), - CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), - CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) -} -CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) - for camera_model in CAMERA_MODELS]) -CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) - for camera_model in CAMERA_MODELS]) - -def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): - """Read and unpack the next bytes from a binary file. - :param fid: - :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. - :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. - :param endian_character: Any of {@, =, <, >, !} - :return: Tuple of read and unpacked values. - """ - data = fid.read(num_bytes) - return struct.unpack(endian_character + format_char_sequence, data) - -def read_cameras_binary(path_to_model_file): - """ - Read colmap camera binary format - see: src/base/reconstruction.cc - void Reconstruction::WriteCamerasBinary(const std::string& path) - void Reconstruction::ReadCamerasBinary(const std::string& path) - """ - cameras = {} - with open(path_to_model_file, "rb") as fid: - num_cameras = read_next_bytes(fid, 8, "Q")[0] - for _ in range(num_cameras): - camera_properties = read_next_bytes( - fid, num_bytes=24, format_char_sequence="iiQQ") - camera_id = camera_properties[0] - model_id = camera_properties[1] - model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name - width = camera_properties[2] - height = camera_properties[3] - num_params = CAMERA_MODEL_IDS[model_id].num_params - params = read_next_bytes(fid, num_bytes=8*num_params, - format_char_sequence="d"*num_params) - cameras[camera_id] = Camera(id=camera_id, - model=model_name, - width=width, - height=height, - params=np.array(params)) - if len(cameras) != num_cameras: - raise RuntimeError("Cameras dont match num_camera") - return cameras - -def read_images_binary(path_to_model_file): - """ - see: src/base/reconstruction.cc - void Reconstruction::ReadImagesBinary(const std::string& path) - void Reconstruction::WriteImagesBinary(const std::string& path) - """ - images = {} - with open(path_to_model_file, "rb") as fid: - num_reg_images = read_next_bytes(fid, 8, "Q")[0] - for _ in range(num_reg_images): - binary_image_properties = read_next_bytes( - fid, num_bytes=64, format_char_sequence="idddddddi") - image_id = binary_image_properties[0] - qvec = np.array(binary_image_properties[1:5]) - tvec = np.array(binary_image_properties[5:8]) - camera_id = binary_image_properties[8] - image_name = "" - current_char = read_next_bytes(fid, 1, "c")[0] - while current_char != b"\x00": # look for the ASCII 0 entry - image_name += current_char.decode("utf-8") - current_char = read_next_bytes(fid, 1, "c")[0] - num_points2D = read_next_bytes(fid, num_bytes=8, - format_char_sequence="Q")[0] - x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, - format_char_sequence="ddq"*num_points2D) - xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), - tuple(map(float, x_y_id_s[1::3]))]) - point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) - images[image_id] = Image( - id=image_id, qvec=qvec, tvec=tvec, - camera_id=camera_id, name=image_name, - xys=xys, point3D_ids=point3D_ids) - return images -# -def colmap2meshroom_instrinsics(colmap_intrinsics, sfm_data={}): - sfm_data = sfm_data.copy() - intrinsics = [] - for camera_id, colmap_camera in colmap_intrinsics.items(): - intrinsic={ - "intrinsicId": camera_id, - "width": colmap_camera.width, - "height": colmap_camera.height, - "sensorWidth": "1", - "sensorHeight": str(colmap_camera.height/colmap_camera.width), - "serialNumber": "-1", - "initializationMode": "unknown", - "initialFocalLength": "-1", - "pixelRatio": "1", - "pixelRatioLocked": "true", - "locked": "false" - } - if colmap_camera.model == "SIMPLE_PINHOLE": - intrinsic["type"]="pinhole" - intrinsic["focalLength"]=colmap_camera.params[0] - intrinsic["principalPoint"]=[colmap_camera.params[1], colmap_camera.params[2]] - if colmap_camera.model == "PINHOLE": - intrinsic["type"]="pinhole" - intrinsic["focalLength"]=[colmap_camera.params[0], colmap_camera.params[1]] - intrinsic["principalPoint"]=[colmap_camera.params[2], colmap_camera.params[3]] - elif colmap_camera.model == "SIMPLE_RADIAL" : - intrinsic["type"]="radial1" - intrinsic["focalLength"]=colmap_camera.params[0] - intrinsic["principalPoint"]=[colmap_camera.params[1], colmap_camera.params[2]] - intrinsic["distortionParams"]=[colmap_camera.params[3]] - else: - raise RuntimeError("Camera model not supported yet") # TODO: colmap_camera.model == "RADIAL" - - pixel_size = 1/colmap_camera.width - #converts the focal in "mm", assuming sensor width=1 - if isinstance(intrinsic["focalLength"], list) : - print("WARNING: anamorphic lenses not supported, will take mean of intrinsic") - intrinsic["focalLength"] = np.average(intrinsic["focalLength"]) - intrinsic["focalLength"]=pixel_size*intrinsic["focalLength"] - # intrinsic["focalLength"]=[pixel_size*x for x in intrinsic["focalLength"]] - #principal point as delta from center - intrinsic["principalPoint"]=[intrinsic["principalPoint"][0]-colmap_camera.width/2.0, - intrinsic["principalPoint"][1]-colmap_camera.height/2.0, - ] - - intrinsics.append(intrinsic) - sfm_data["intrinsics"] = intrinsics - return sfm_data - -def colmap2meshroom_extrinsics(colmap_extrinsics, colmap_intrinsics, image_folder="", sfm_data={}): - sfm_data = sfm_data.copy() - extrinsics = [] - views = [] - single_cam = False - if len(colmap_intrinsics)= 3: - break - byte = fid.read(1) - array = np.fromfile(fid, np.float32) - array = array.reshape((width, height, channels), order="F") - return np.transpose(array, (1, 0, 2)).squeeze() +from meshroom.core.plugin import PluginCommandLineNode, EnvType - -class ImportColmapDepthMaps(desc.Node): +class ImportColmapDepthMaps(PluginCommandLineNode): category = 'MRRS - Colmap' - documentation = '''''' + documentation = ''' ''' + + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "env.yaml") + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "import_colmap_depth_maps.py")+'" {allParams}' + inputs = [ desc.File( - name="input", + name="inputFolder", label="Input", description="COLMAP Dense folder in workspace.", value="", @@ -79,79 +56,11 @@ class ImportColmapDepthMaps(desc.Node): description='Generated depth maps.', semantic='image', value=desc.Node.internalFolder + '_depthMap.exr', +<<<<<<< HEAD +======= + uid=[], + group="" +>>>>>>> port colmap ), ] - - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if chunk.node.input.value == '': - chunk.logger.warning( - 'No input workspace in node ImportColmapDepthMaps, skipping') - return False - return True - - def processChunk(self, chunk): - """ - Import depth maps from COLMAP. - """ - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - if not self.check_inputs(chunk): - return - - depth_map_folder = os.path.join(chunk.node.input.value,'stereo','depth_maps') - normal_map_folder = os.path.join(chunk.node.input.value,'stereo','normal_maps') - - depth_map_paths = [f for f in glob.glob(os.path.join(depth_map_folder,"*.*.photometric.bin"))] - normal_map_paths = [f for f in glob.glob(os.path.join(normal_map_folder,"*.*.photometric.bin"))] - - view_uid_map = {} - if chunk.node.inputSfm.value != '': - sfm_data = json.load(open(chunk.node.inputSfm.value, 'r')) - #map view path => uid - for view in sfm_data['views']: - view_basename = os.path.basename(view['path']).split(".")[0] - view_uid_map[view_basename] =view['viewId'] - extrinsics, intrinsics, _, _, _, pixel_sizes_all_cams=matrices_from_sfm_data(sfm_data) - - for index, (depth_map_path, normal_map_path) in enumerate(zip(depth_map_paths, normal_map_paths)): - - depth_map = read_array(depth_map_path) - normal_map = read_array(normal_map_path) - - min_depth, max_depth = np.percentile( - depth_map, [1, 99]) - depth_map[depth_map < min_depth] = min_depth - depth_map[depth_map > max_depth] = max_depth - - depth_map_name = "%d_depthMap.exr"%index - #if a sfmdata has been passed, matches the uid - if chunk.node.inputSfm.value != '': - depth_map_basename=os.path.basename(depth_map_path).split(".")[0] - if depth_map_basename in view_uid_map.keys(): - depth_map_name = view_uid_map[depth_map_basename]+"_depthMap.exr" - else: - chunk.logger.warning('Warning depth map for view '+depth_map_path+' not found in sfm data') - - #also resize to sfm data size if any - size=(int(sfm_data['views'][index]["width"]),int(sfm_data['views'][index]["height"])) - depth_map, _ =cv2_resize_with_pad(depth_map, size, padding_color=0) - - # #add metadata as well (used for display) - # # camera_center = extrinsics[index][0:3, 3] - # # inverse_intr_rot = np.linalg.inv( - # # intrinsics[index] @ np.linalg.inv(extrinsics[index][0:3, 0:3])) - # camera_center = np.linalg.inv(np.concatenate([extrinsics[index], [[0,0,0,1]]]))[0:3, 3] - # depth_meta = { - # "AliceVision:CArr": camera_center, - # "AliceVision:iCamArr": extrinsics[index][0:3, 0:3],#inverse_intr_rot, - # "AliceVision:downscale": 1 - # } FIXME: moved to depthmap transform? - save_exr(depth_map,os.path.join(chunk.node.depthMapFolder.value, depth_map_name),'depth')#, custom_header=depth_meta) - - chunk.logger.info('Import done.') - finally: - chunk.logManager.end() diff --git a/mrrs/colmap/Meshroom2ColmapSfmConvertions.py b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py index 8dfa773..7f21e3c 100644 --- a/mrrs/colmap/Meshroom2ColmapSfmConvertions.py +++ b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py @@ -2,21 +2,22 @@ import os import json import shutil -from meshroom.core import desc -from mrrs.core.ios import open_image, save_image -from mrrs.core.utils import cv2_resize_with_pad +from meshroom.core import desc +from meshroom.core.plugin import PluginCommandLineNode, EnvType -class Meshroom2ColmapSfmConvertions(desc.CommandLineNode): - commandLine = 'aliceVision_exportColmap -i {preparedSfmValue} -o {outputValue} ' - size = desc.DynamicNodeSize('input') +class Meshroom2ColmapSfmConvertions(PluginCommandLineNode): + commandLine = 'aliceVision_exportColmap -i {preparedSfmValue} -o {outputValue}' category = 'MRRS - Colmap' documentation = ''' ''' + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "env.yaml") + inputs = [ desc.File( - name='input', + name='inputSfm', label='Input', description='SfMData file.', value='', @@ -74,9 +75,11 @@ class Meshroom2ColmapSfmConvertions(desc.CommandLineNode): def processChunk(self, chunk): + from mrrs.core.ios import open_image, save_image + from mrrs.core.utils import cv2_resize_with_pad # get image info - sfm_data = json.load(open(chunk.node.input.value)) + sfm_data = json.load(open(chunk.node.inputSfm.value)) views = sfm_data["views"] images_path = [v["path"] for v in views] image_sizes = [[int(v["width"]), int(v["height"])] for v in views] @@ -88,7 +91,7 @@ def processChunk(self, chunk): new_images_path = [os.path.join( images_output_folder, basename) for basename in images_basename] #get if we must resize - do_resize = chunk.node.maxImageSize.value == 0 or image_sizes[0][0]>chunk.node.maxImageSize.value + do_resize = (chunk.node.maxImageSize.value == 0) and (image_sizes[0][0]>chunk.node.maxImageSize.value) #modify .sfm with new sizes and filepath if do_resize: @@ -108,7 +111,8 @@ def processChunk(self, chunk): with open(os.path.join(chunk.node.preparedSfm.value), 'w') as f: json.dump(sfm_data, f, indent=4) else: #or ceate symlink stright to the sfm - os.symlink(chunk.node.input.value, chunk.node.preparedSfm.value) + os.symlink(chunk.node.inputSfm.value, chunk.node.preparedSfm.value) + #run the cl desc.CommandLineNode.processChunk(self, chunk) diff --git a/mrrs/colmap/PatchMatchStereo.py b/mrrs/colmap/PatchMatchStereo.py index 1bd30b3..6226a1b 100644 --- a/mrrs/colmap/PatchMatchStereo.py +++ b/mrrs/colmap/PatchMatchStereo.py @@ -6,7 +6,7 @@ from meshroom.core import desc from . import COLMAP -class PatchMatchStereo(desc.CommandLineNode): +class ColmapPatchMatchStereo(desc.CommandLineNode): commandLine = COLMAP+' patch_match_stereo {allParams} --PatchMatchStereo.gpu_index 0' gpu = desc.Level.INTENSIVE category = 'MRRS - Colmap' diff --git a/mrrs/colmap/PoissonMesher.py b/mrrs/colmap/PoissonMesher.py index abdfa6b..cacc167 100644 --- a/mrrs/colmap/PoissonMesher.py +++ b/mrrs/colmap/PoissonMesher.py @@ -2,15 +2,17 @@ import os from meshroom.core import desc - -import trimesh +from meshroom.core.plugin import PluginCommandLineNode, EnvType from . import COLMAP -from mrrs.core.geometry import CG_CV_MAT44 -class PoissonMesher(desc.CommandLineNode): +class ColmapPoissonMesher(PluginCommandLineNode): commandLine = COLMAP+' poisson_mesher {input_path} --PoissonMeshing.trim {trimValue} --output_path {output_meshValue}' + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "env.yaml") + + category = 'MRRS - Colmap' documentation = '''''' @@ -42,6 +44,9 @@ class PoissonMesher(desc.CommandLineNode): def processChunk(self, chunk): desc.CommandLineNode.processChunk(self, chunk) + import trimesh + from mrrs.core.geometry import CG_CV_MAT44 + #! env #re-orient mesh mesh = trimesh.load(chunk.node.output_mesh.value) mesh.apply_transform(CG_CV_MAT44) diff --git a/mrrs/colmap/StereoFusion.py b/mrrs/colmap/StereoFusion.py index c2dabc0..fb42dfe 100644 --- a/mrrs/colmap/StereoFusion.py +++ b/mrrs/colmap/StereoFusion.py @@ -6,7 +6,7 @@ from meshroom.core import desc from . import COLMAP -class StereoFusion(desc.CommandLineNode): +class ColmapStereoFusion(desc.CommandLineNode): commandLine = COLMAP+' stereo_fusion {allParams}' category = 'MRRS - Colmap' diff --git a/mrrs/colmap/colmap_2_meshroom_sfm_convertion.py b/mrrs/colmap/colmap_2_meshroom_sfm_convertion.py new file mode 100644 index 0000000..0133bbd --- /dev/null +++ b/mrrs/colmap/colmap_2_meshroom_sfm_convertion.py @@ -0,0 +1,277 @@ +import json +import argparse +import os +import collections +import struct + +import numpy as np + +#from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py +CameraModel = collections.namedtuple( + "CameraModel", ["model_id", "model_name", "num_params"]) +Camera = collections.namedtuple( + "Camera", ["id", "model", "width", "height", "params"]) +BaseImage = collections.namedtuple( + "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) + +def qvec2rotmat(qvec): + return np.array([ + [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, + 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], + 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], + [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], + 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, + 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], + [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], + 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], + 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) + +class Image(BaseImage): + def qvec2rotmat(self): + return qvec2rotmat(self.qvec) + +CAMERA_MODELS = { + CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), + CameraModel(model_id=1, model_name="PINHOLE", num_params=4), + CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), + CameraModel(model_id=3, model_name="RADIAL", num_params=5), + CameraModel(model_id=4, model_name="OPENCV", num_params=8), + CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), + CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), + CameraModel(model_id=7, model_name="FOV", num_params=5), + CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), + CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), + CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) +} +CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) + for camera_model in CAMERA_MODELS]) +CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) + for camera_model in CAMERA_MODELS]) + +def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): + """Read and unpack the next bytes from a binary file. + :param fid: + :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + :param endian_character: Any of {@, =, <, >, !} + :return: Tuple of read and unpacked values. + """ + data = fid.read(num_bytes) + return struct.unpack(endian_character + format_char_sequence, data) + +def read_cameras_binary(path_to_model_file): + """ + Read colmap camera binary format + see: src/base/reconstruction.cc + void Reconstruction::WriteCamerasBinary(const std::string& path) + void Reconstruction::ReadCamerasBinary(const std::string& path) + """ + cameras = {} + with open(path_to_model_file, "rb") as fid: + num_cameras = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_cameras): + camera_properties = read_next_bytes( + fid, num_bytes=24, format_char_sequence="iiQQ") + camera_id = camera_properties[0] + model_id = camera_properties[1] + model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name + width = camera_properties[2] + height = camera_properties[3] + num_params = CAMERA_MODEL_IDS[model_id].num_params + params = read_next_bytes(fid, num_bytes=8*num_params, + format_char_sequence="d"*num_params) + cameras[camera_id] = Camera(id=camera_id, + model=model_name, + width=width, + height=height, + params=np.array(params)) + if len(cameras) != num_cameras: + raise RuntimeError("Cameras dont match num_camera") + return cameras + +def read_images_binary(path_to_model_file): + """ + see: src/base/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + images = {} + with open(path_to_model_file, "rb") as fid: + num_reg_images = read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_reg_images): + binary_image_properties = read_next_bytes( + fid, num_bytes=64, format_char_sequence="idddddddi") + image_id = binary_image_properties[0] + qvec = np.array(binary_image_properties[1:5]) + tvec = np.array(binary_image_properties[5:8]) + camera_id = binary_image_properties[8] + image_name = "" + current_char = read_next_bytes(fid, 1, "c")[0] + while current_char != b"\x00": # look for the ASCII 0 entry + image_name += current_char.decode("utf-8") + current_char = read_next_bytes(fid, 1, "c")[0] + num_points2D = read_next_bytes(fid, num_bytes=8, + format_char_sequence="Q")[0] + x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, + format_char_sequence="ddq"*num_points2D) + xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), + tuple(map(float, x_y_id_s[1::3]))]) + point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) + images[image_id] = Image( + id=image_id, qvec=qvec, tvec=tvec, + camera_id=camera_id, name=image_name, + xys=xys, point3D_ids=point3D_ids) + return images +# +def colmap2meshroom_instrinsics(colmap_intrinsics, sfm_data={}): + sfm_data = sfm_data.copy() + intrinsics = [] + for camera_id, colmap_camera in colmap_intrinsics.items(): + intrinsic={ + "intrinsicId": camera_id, + "width": colmap_camera.width, + "height": colmap_camera.height, + "sensorWidth": "1", + "sensorHeight": str(colmap_camera.height/colmap_camera.width), + "serialNumber": "-1", + "initializationMode": "unknown", + "initialFocalLength": "-1", + "pixelRatio": "1", + "pixelRatioLocked": "true", + "locked": "false" + } + if colmap_camera.model == "SIMPLE_PINHOLE": + intrinsic["type"]="pinhole" + intrinsic["focalLength"]=colmap_camera.params[0] + intrinsic["principalPoint"]=[colmap_camera.params[1], colmap_camera.params[2]] + if colmap_camera.model == "PINHOLE": + intrinsic["type"]="pinhole" + intrinsic["focalLength"]=[colmap_camera.params[0], colmap_camera.params[1]] + intrinsic["principalPoint"]=[colmap_camera.params[2], colmap_camera.params[3]] + elif colmap_camera.model == "SIMPLE_RADIAL" : + intrinsic["type"]="radial1" + intrinsic["focalLength"]=colmap_camera.params[0] + intrinsic["principalPoint"]=[colmap_camera.params[1], colmap_camera.params[2]] + intrinsic["distortionParams"]=[colmap_camera.params[3]] + else: + raise RuntimeError("Camera model not supported yet") # TODO: colmap_camera.model == "RADIAL" + + pixel_size = 1/colmap_camera.width + #converts the focal in "mm", assuming sensor width=1 + if isinstance(intrinsic["focalLength"], list) : + print("WARNING: anamorphic lenses not supported, will take mean of intrinsic") + intrinsic["focalLength"] = np.average(intrinsic["focalLength"]) + intrinsic["focalLength"]=pixel_size*intrinsic["focalLength"] + # intrinsic["focalLength"]=[pixel_size*x for x in intrinsic["focalLength"]] + #principal point as delta from center + intrinsic["principalPoint"]=[intrinsic["principalPoint"][0]-colmap_camera.width/2.0, + intrinsic["principalPoint"][1]-colmap_camera.height/2.0, + ] + + intrinsics.append(intrinsic) + sfm_data["intrinsics"] = intrinsics + return sfm_data + +def colmap2meshroom_extrinsics(colmap_extrinsics, colmap_intrinsics, image_folder="", sfm_data={}): + sfm_data = sfm_data.copy() + extrinsics = [] + views = [] + single_cam = False + if len(colmap_intrinsics)= 3: + break + byte = fid.read(1) + array = np.fromfile(fid, np.float32) + array = array.reshape((width, height, channels), order="F") + return np.transpose(array, (1, 0, 2)).squeeze() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + '-i', '--inputFolder', + help="", + ) + parser.add_argument( + '-j', '--inputSfm', + help="", + ) + parser.add_argument( + '-o', '--depthMapFolder', + help="", + ) + + args = parser.parse_args() + + + input_folder = args.inputFolder + input_sfm = args.inputSfm + depth_maps_folder = args.depthMapFolder + + depth_map_folder = os.path.join(input_folder,'stereo','depth_maps') + normal_map_folder = os.path.join(input_folder,'stereo','normal_maps') + + depth_map_paths = [f for f in glob.glob(os.path.join(depth_map_folder,"*.*.photometric.bin"))] + normal_map_paths = [f for f in glob.glob(os.path.join(normal_map_folder,"*.*.photometric.bin"))] + + view_uid_map = {} + if input_sfm != '': + sfm_data = json.load(open(input_sfm, 'r')) + #map view path => uid + for view in sfm_data['views']: + view_basename = os.path.basename(view['path']).split(".")[0] + view_uid_map[view_basename] =view['viewId'] + extrinsics, intrinsics, _, _, _, pixel_sizes_all_cams=matrices_from_sfm_data(sfm_data) + + for index, (depth_map_path, normal_map_path) in enumerate(zip(depth_map_paths, normal_map_paths)): + + depth_map = read_array(depth_map_path) + normal_map = read_array(normal_map_path) + + min_depth, max_depth = np.percentile( + depth_map, [1, 99]) + depth_map[depth_map < min_depth] = min_depth + depth_map[depth_map > max_depth] = max_depth + + depth_map_name = "%d_depthMap.exr"%index + #if a sfmdata has been passed, matches the uid + if input_sfm != '': + depth_map_basename=os.path.basename(depth_map_path).split(".")[0] + if depth_map_basename in view_uid_map.keys(): + depth_map_name = view_uid_map[depth_map_basename]+"_depthMap.exr" + else: + print('Warning depth map for view '+depth_map_path+' not found in sfm data') + else: + print('Warning depth map for view '+depth_map_path+' not found in sfm data') + + #also resize to sfm data size if any + size=(int(sfm_data['views'][index]["width"]),int(sfm_data['views'][index]["height"])) + depth_map, _ =cv2_resize_with_pad(depth_map, size, padding_color=0) + + save_exr(depth_map,os.path.join(depth_maps_folder, depth_map_name)) + diff --git a/mrrs/deep_depth_map/VizMVSNet.py b/mrrs/deep_depth_map/VizMVSNet.py index c175555..eea64b4 100644 --- a/mrrs/deep_depth_map/VizMVSNet.py +++ b/mrrs/deep_depth_map/VizMVSNet.py @@ -4,7 +4,7 @@ import cv2 from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode from mrrs.core.ios import matrices_from_sfm_data, open_depth_map, save_exr from mrrs.core.utils import format_float_array @@ -12,31 +12,9 @@ EXEC = "python "+ os.path.join(os.path.dirname(__file__), "Vis-MVSNet/test.py") MODEL_PATH = os.path.join(os.path.dirname(__file__), "Vis-MVSNet/pretrained_model/vis") -# parser.add_argument('--data_root', type=str, help='The root dir of the data.') -# parser.add_argument('--dataset_name', type=str, default='tanksandtemples', help='The name of the dataset. Should be identical to the dataloader source file. e.g. blended refers to data/blended.py.') -# parser.add_argument('--model_name', type=str, default='model_cas', help='The name of the model. Should be identical to the model source file. e.g. model_cas refers to core/model_cas.py.') +class VizMVSNet(PluginNode): -# parser.add_argument('--num_src', type=int, default=7, help='The number of source views.') -# parser.add_argument('--max_d', type=int, default=256, help='The standard max depth number.') -# parser.add_argument('--interval_scale', type=float, default=1., help='The standard interval scale.') -# parser.add_argument('--cas_depth_num', type=str, default='64,32,16', help='The depth number for each stage.') -# parser.add_argument('--cas_interv_scale', type=str, default='4,2,1', help='The interval scale for each stage.') -# parser.add_argument('--resize', type=str, default='1920,1080', help='The size of the preprocessed input resized from the original one.') -# parser.add_argument('--crop', type=str, default='1920,1056', help='The size of the preprocessed input cropped from the resized one.') - -# parser.add_argument('--mode', type=str, default='soft', choices=['soft', 'hard', 'uwta', 'maxpool', 'average'], help='The fusion strategy.') -# parser.add_argument('--occ_guide', action='store_true', default=False, help='Deprecated') - -# parser.add_argument('--load_path', type=str, default=None, help='The dir of the folder containing the pretrained checkpoints.') -# parser.add_argument('--load_step', type=int, default=-1, help='The step to load. -1 for the latest one.') - -# parser.add_argument('--show_result', action='store_true', default=False, help='Set to show the results.') -# parser.add_argument('--write_result', action='store_true', default=False, help='Set to save the results.') -# parser.add_argument('--result_dir', type=str, help='The dir to save the results.') - -class VizMVSNet(CondaNode): - - category = 'VizMVSNet' + category = 'MRRS - Deep depth' documentation = ''' ''' gpu = desc.Level.INTENSIVE diff --git a/mrrs/deep_feature_matching/DeepFeatureExtraction.py b/mrrs/deep_feature_matching/DeepFeatureExtraction.py index 2466279..46fc462 100644 --- a/mrrs/deep_feature_matching/DeepFeatureExtraction.py +++ b/mrrs/deep_feature_matching/DeepFeatureExtraction.py @@ -3,12 +3,12 @@ from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/deep_feature_extraction.py") -class DeepFeatureExtraction(CondaNode): +class DeepFeatureExtraction(PluginNode): category = 'Sparse Reconstruction' documentation = ''' ''' diff --git a/mrrs/deep_feature_matching/LightGlueMatcher.py b/mrrs/deep_feature_matching/LightGlueMatcher.py index 9066811..6008590 100644 --- a/mrrs/deep_feature_matching/LightGlueMatcher.py +++ b/mrrs/deep_feature_matching/LightGlueMatcher.py @@ -3,11 +3,11 @@ from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/light_glue_matcher.py") -class LightGlueMatching(CondaNode): +class LightGlueMatching(PluginNode): category = 'Sparse Reconstruction' documentation = ''' ''' diff --git a/mrrs/deep_feature_matching/LoftrMatcher.py b/mrrs/deep_feature_matching/LoftrMatcher.py index 1969ab9..22d9c7a 100644 --- a/mrrs/deep_feature_matching/LoftrMatcher.py +++ b/mrrs/deep_feature_matching/LoftrMatcher.py @@ -3,11 +3,11 @@ from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode LOFTR_EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/loftr_matcher.py") -class LoftrMatcher(CondaNode): +class LoftrMatcher(PluginNode): category = 'Sparse Reconstruction' documentation = ''' ''' diff --git a/mrrs/gaussian_splatting/ios.py b/mrrs/gaussian_splatting/ios.py new file mode 100644 index 0000000..1ec64bc --- /dev/null +++ b/mrrs/gaussian_splatting/ios.py @@ -0,0 +1,64 @@ +import numpy as np +from trimesh.exchange.ply import _parse_header, _ply_binary +import numpy as np + +def sigmoid(x): + return 1 / (1 + np.exp(-x)) + +def load_gs_ply(path, max_sh_degree=3): + """ + (modified from original repo) + """ + with open(path, 'rb') as f: + elements, _, _ = _parse_header(f) + _ply_binary(elements, f) + + xyz = np.stack((np.asarray(elements['vertex']['data']["x"]), + np.asarray(elements['vertex']['data']["y"]), + np.asarray(elements['vertex']['data']["z"])), axis=1) + opacities = np.asarray(elements['vertex']['data']["opacity"])[..., np.newaxis] + #aaply activation + opacities = sigmoid(opacities) + + features_dc = np.zeros((xyz.shape[0], 3, 1)) + features_dc[:, 0, 0] = np.asarray(elements['vertex']['data']["f_dc_0"]) + features_dc[:, 1, 0] = np.asarray(elements['vertex']['data']["f_dc_1"]) + features_dc[:, 2, 0] = np.asarray(elements['vertex']['data']["f_dc_2"]) + + extra_f_names = [p for p in elements['vertex']['properties'] if p.startswith("f_rest_")] + extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) + assert len(extra_f_names)==3*(max_sh_degree + 1) ** 2 - 3 + features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) + for idx, attr_name in enumerate(extra_f_names): + features_extra[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) + features_extra = features_extra.reshape((features_extra.shape[0], 3, (max_sh_degree + 1) ** 2 - 1)) + + scale_names = [p for p in elements['vertex']['properties']if p.startswith("scale_")] + scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) + scales = np.zeros((xyz.shape[0], len(scale_names))) + for idx, attr_name in enumerate(scale_names): + scales[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + #scaling activation + scales=np.exp(scales) + + + + rot_names = [p for p in elements['vertex']['properties']if p.startswith("rot")] + rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) + rots = np.zeros((xyz.shape[0], len(rot_names))) + for idx, attr_name in enumerate(rot_names): + rots[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + + + return xyz, rots, scales, \ + opacities, features_dc, features_extra + +def rgb_from_sh(sh_0): + """ + Get RGB values for sh coef 0 (solid color) + """ + C0 = 0.28209479177387814 + result = C0 * sh_0 + 0.5 + result = np.clip(result, 0, 1) + return result diff --git a/mrrs/nerf/nerfstudio.py b/mrrs/nerf/nerfstudio.py index aad546d..1c26740 100644 --- a/mrrs/nerf/nerfstudio.py +++ b/mrrs/nerf/nerfstudio.py @@ -4,7 +4,7 @@ import shutil from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode from mrrs.nerf import ENV_FILE def convert_sfmdata_to_nerf(sfm_data, actual_path): @@ -114,7 +114,7 @@ def copy_recursive_walk(root_path, path): for f in files: shutil.move(os.path.join(root, f), root_path) -class NeRFStudio(CondaNode): +class NeRFStudio(PluginNode): category = 'Meshroom Research' documentation = ''' ''' diff --git a/mrrs/nerf/nerfstudio_export.py b/mrrs/nerf/nerfstudio_export.py index ab06d62..b0f924b 100644 --- a/mrrs/nerf/nerfstudio_export.py +++ b/mrrs/nerf/nerfstudio_export.py @@ -2,10 +2,10 @@ import os from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode from mrrs.nerf import ENV_FILE -class NeRFStudioExport(CondaNode): +class NeRFStudioExport(PluginNode): category = 'Meshroom Research' documentation = ''' ''' diff --git a/mrrs/reality_capture/ExportXMP.py b/mrrs/reality_capture/ExportXMP.py index 9c5e1ef..7f7344a 100644 --- a/mrrs/reality_capture/ExportXMP.py +++ b/mrrs/reality_capture/ExportXMP.py @@ -6,9 +6,9 @@ import os from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode -class ExportXMP(CondaNode): +class ExportXMP(PluginNode): category = 'MRRS - Reality Capture' diff --git a/mrrs/reality_capture/ImportXMP.py b/mrrs/reality_capture/ImportXMP.py index 7f18438..4617012 100644 --- a/mrrs/reality_capture/ImportXMP.py +++ b/mrrs/reality_capture/ImportXMP.py @@ -4,11 +4,11 @@ __version__ = "3.0" from meshroom.core import desc -from meshroom.core.plugin import CondaNode +from meshroom.core.plugin import PluginNode import os -class ImportXMP(CondaNode): +class ImportXMP(PluginNode): category = 'MRRS - Reality Capture' diff --git a/mrrs/utils/ComputeNormals.py b/mrrs/utils/ComputeNormals.py deleted file mode 100644 index 17b1f3b..0000000 --- a/mrrs/utils/ComputeNormals.py +++ /dev/null @@ -1,107 +0,0 @@ -__version__ = "3.0" - -import json -import os - -import numpy as np - -from meshroom.core import desc - -from mrrs.core.ios import matrices_from_sfm_data, open_exr, open_image, save_exr -from mrrs.core.geometry import compute_normals, make_homogeneous - -class ComputeNormals(desc.Node): - """ - Class that compute normal maps from a depth map folder - """ - # gpu = desc.Level.HIGH - - category = 'Meshroom Research'#'Dense Reconstruction' - documentation = '''Compute normal maps from a depth map folder''' - - inputs = [ - desc.File( - name='inputSfmData', - label='SfMData', - description='SfMData file.', - value='', - ), - desc.File( - name="depthMapsFolder", - label="DepthMaps Folder", - description="Input depth maps folder.", - value="", - ), - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description='''Verbosity level (fatal, error, warning, info, debug, trace).''', - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - ), - ] - - outputs = [ - desc.File( - name='outputNormalFolder', - label='Output normal Folder', - description='Output folder for refined depth maps.', - value=desc.Node.internalFolder, - ), - desc.File( - name='normals', - label='Normal maps', - description='Generated depth maps.', - semantic='image', - value=desc.Node.internalFolder + '.exr', - group='', # do not export on the command line - ), - ] - - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if not chunk.node.inputSfmData.value: - chunk.logger.warning("No input inputSfmData in node DeepDepthMapRefinement, skipping") - return False - if not chunk.node.depthMapsFolder.value: - chunk.logger.warning("No input depthMapsFolder in node DeepDepthMapRefinement, skipping") - return False - return True - - - def processChunk(self, chunk): - """ - Opens the dataset data. - """ - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - # check inputs - if not self.check_inputs(chunk): - return - chunk.logger.info("Starts computing normals") - #open sfm - with open(chunk.node.inputSfmData.value, "r") as json_file: - sfm_data = json.load(json_file) - - # extrinsics_all_cams, intrinsics_all_cams, views_id, poses_id, intrinsics_id, pixel_sizes_all_cams = matrices_from_sfm_data(sfm_data) - - #run normlal conv - for view in sfm_data["views"]: - view_id = view["viewId"] - # extrinsics = extrinsics_all_cams[np.where(poses_id==view["poseId"])[0][0]] - depth_map_path = os.path.join(chunk.node.depthMapsFolder.value,view_id+"_depthMap.exr" )#FIXME: hardcoded - depth_map, depth_map_header = open_exr(depth_map_path) - normals = compute_normals(depth_map) - # normals_world_cs = (extrinsics[0:3,0:3]@normals.reshape(-1,3).T).T - # normals_world_cs=normals_world_cs.reshape(normals.shape) - # write noramls - normals_path = os.path.join(chunk.node.outputNormalFolder.value,view_id + ".exr" )#FIXME: hardcoded - save_exr(normals, normals_path, data_type="RGB") - - chunk.logger.info("Done computing normals") - finally: - chunk.logManager.end() - diff --git a/mrrs/utils/DepthMapTransform.py b/mrrs/utils/DepthMapTransform.py index d2bdbac..6063905 100644 --- a/mrrs/utils/DepthMapTransform.py +++ b/mrrs/utils/DepthMapTransform.py @@ -1,113 +1,19 @@ __version__ = "3.0" +import logging import os import json -import cv2 - from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType -from mrrs.core.ios import * -from mrrs.core.geometry import * -from mrrs.core.utils import listdir_fullpath - -def meshroom2normal(pixels, depth_map, extrinsic, intrinsic, pixel_size): - """ - Convert meshroom depth maps to conventional depth maps. - """ - if extrinsic is None: - raise ValueError("Must pass an sfm for this transform") - #deproject using meshoom deprojection - scene_points = camera_deprojection_meshroom(pixels, depth_map, extrinsic, intrinsic, pixel_size) - #reprojection used to get depth map - _, points_distances_from_camera = camera_projection(scene_points, extrinsic, intrinsic, pixel_size) - depth_map_converted = np.reshape(points_distances_from_camera, depth_map.shape) - return depth_map_converted - -def normal2meshroom(pixels, depth_map, extrinsic, intrinsic, pixel_size): - """ - Convert conventional depth maps to meshroom depth maps. - """ - if extrinsic is None: - raise ValueError("Must pass an sfm for this transform") - #deproject using regular equation - scene_points = camera_deprojection(pixels, depth_map, extrinsic, intrinsic, pixel_size) - #Z is distance from camera center - points_distances_from_camera = np.sqrt(np.sum((scene_points-extrinsic[0:3,3])**2, axis=-1)) - depth_map_converted = np.reshape(points_distances_from_camera, depth_map.shape) - depth_map_converted[depth_map<0]=0 - return depth_map_converted - -def id(pixels, depth_map, extrinsic, intrinsic, pixel_size): - return depth_map - -def do_transform(depth_maps_path, sfm_data, transform, output_folder): - """ - Runs the transform on a set of depth maps. - """ - output_depth_map_paths = [] - if sfm_data is not None: - extrinsics, intrinsics, _, _, _, pixel_sizes = matrices_from_sfm_data(sfm_data) - pixels = None - for index, view in enumerate(sfm_data["views"]): - logging.info("Converting view %d/%d"%(index, len(pixel_sizes))) - view_id = view["viewId"] - if not os.path.exists(depth_maps_path[index]): - logging.warning(depth_maps_path[index]+" cannot be found, skipping") - continue - depth_map, depth_map_header = open_exr(depth_maps_path[index]) - depth_map=depth_map.astype(np.float32) - - depth_map_size = np.asarray(depth_map.shape[0:2]) - #add downscale if not present - if "AliceVision:downscale" not in depth_map_header: - depth_map_header["AliceVision:downscale"]=float(view["width"])/float(depth_map_size[1]) - - #FIXME: resizing is not ideal, but convenient to use our calib directly - scale = float(depth_map_header["AliceVision:downscale"]) - depth_map = cv2.resize(depth_map, (scale*depth_map_size[::-1]).astype(np.int32)) - logging.info("Rescaling depth map with %f"%scale) - - ys, xs = np.meshgrid(range(0, depth_map.shape[0]), \ - range(0, depth_map.shape[1]), \ - indexing="ij") - pixels = [xs, ys] - depth_map_transformed = transform(pixels, depth_map, extrinsics[index], intrinsics[index], pixel_sizes[index]) - output_depth_map_path = os.path.join(output_folder, view_id+"_depthMap.exr") - depth_map_transformed[depth_map<0] = 0#put 0 in places where its invalid - - #resie to orginnal size - depth_map_transformed = cv2.resize(depth_map_transformed, depth_map_size[::-1]) - - # add header for vizualisation - if "AliceVision:CArr" not in depth_map_header: - # edit intrinsics pp with scale - intrinsics_dm = intrinsics[index] - # ? - # intrinsics_dm[0,2]/=depth_map_header["AliceVision:downscale"] - # intrinsics_dm[1,2]/=depth_map_header["AliceVision:downscale"] - camera_center = extrinsics[index][0:3, 3].tolist() - inverse_intr_rot = np.linalg.inv(intrinsics_dm @ np.linalg.inv(extrinsics[index][0:3, 0:3])) - - depth_map_header["AliceVision:CArr"] = camera_center - depth_map_header["AliceVision:iCamArr"]= inverse_intr_rot - - save_exr(depth_map_transformed, output_depth_map_path, custom_header=depth_map_header) - output_depth_map_paths.append(output_depth_map_path) - else: - for depth_map_file in depth_maps_path: - depth_map = open_depth_map(depth_map_file) - depth_map_transformed = transform(None, depth_map, None, None, None) - output_depth_map_path = os.path.join(output_folder, os.path.basename(depth_map_file)+"_depthMap.exr") - save_exr(depth_map_transformed, output_depth_map_path) - output_depth_map_paths.append(output_depth_map_path) - return output_depth_map_paths - -class DepthMapTransform(desc.Node): - - category = 'Meshroom Research' - documentation = '''Will process depth maps (groud truth and/or from folder, according to the selected transformation) -''' +class DepthMapTransform(PluginNode): + + category = 'MRRS - Utils' + documentation = '''Will process depth maps (groud truth and/or from folder, according to the selected transformation)''' + + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") inputs = [ desc.File( @@ -166,6 +72,105 @@ def processChunk(self, chunk): """ Computes the different transforms on the depth maps. """ + import numpy as np + import cv2 + + from mrrs.core.utils import listdir_fullpath + from mrrs.core.geometry import camera_deprojection, camera_deprojection_meshroom, camera_projection + from mrrs.core.ios import matrices_from_sfm_data, open_depth_map, open_exr, save_exr + + def meshroom2normal(pixels, depth_map, extrinsic, intrinsic, pixel_size): + """ + Convert meshroom depth maps to conventional depth maps. + """ + if extrinsic is None: + raise ValueError("Must pass an sfm for this transform") + #deproject using meshoom deprojection + scene_points = camera_deprojection_meshroom(pixels, depth_map, extrinsic, intrinsic, pixel_size) + #reprojection used to get depth map + _, points_distances_from_camera = camera_projection(scene_points, extrinsic, intrinsic, pixel_size) + depth_map_converted = np.reshape(points_distances_from_camera, depth_map.shape) + return depth_map_converted + + def normal2meshroom(pixels, depth_map, extrinsic, intrinsic, pixel_size): + """ + Convert conventional depth maps to meshroom depth maps. + """ + if extrinsic is None: + raise ValueError("Must pass an sfm for this transform") + #deproject using regular equation + scene_points = camera_deprojection(pixels, depth_map, extrinsic, intrinsic, pixel_size) + #Z is distance from camera center + points_distances_from_camera = np.sqrt(np.sum((scene_points-extrinsic[0:3,3])**2, axis=-1)) + depth_map_converted = np.reshape(points_distances_from_camera, depth_map.shape) + depth_map_converted[depth_map<0]=0 + return depth_map_converted + + def id(pixels, depth_map, extrinsic, intrinsic, pixel_size): + return depth_map + + def do_transform(depth_maps_path, sfm_data, transform, output_folder): + """ + Runs the transform on a set of depth maps. + """ + output_depth_map_paths = [] + if sfm_data is not None: + extrinsics, intrinsics, _, _, _, pixel_sizes = matrices_from_sfm_data(sfm_data) + pixels = None + for index, view in enumerate(sfm_data["views"]): + logging.info("Converting view %d/%d"%(index, len(pixel_sizes))) + view_id = view["viewId"] + if not os.path.exists(depth_maps_path[index]): + logging.warning(depth_maps_path[index]+" cannot be found, skipping") + continue + depth_map, depth_map_header = open_exr(depth_maps_path[index]) + depth_map=depth_map.astype(np.float32) + + depth_map_size = np.asarray(depth_map.shape[0:2]) + #add downscale if not present + if "AliceVision:downscale" not in depth_map_header: + depth_map_header["AliceVision:downscale"]=float(view["width"])/float(depth_map_size[1]) + + #FIXME: resizing is not ideal, but convenient to use our calib directly + scale = float(depth_map_header["AliceVision:downscale"]) + depth_map = cv2.resize(depth_map, (scale*depth_map_size[::-1]).astype(np.int32)) + logging.info("Rescaling depth map with %f"%scale) + + ys, xs = np.meshgrid(range(0, depth_map.shape[0]), \ + range(0, depth_map.shape[1]), \ + indexing="ij") + pixels = [xs, ys] + depth_map_transformed = transform(pixels, depth_map, extrinsics[index], intrinsics[index], pixel_sizes[index]) + output_depth_map_path = os.path.join(output_folder, view_id+"_depthMap.exr") + depth_map_transformed[depth_map<0] = 0#put 0 in places where its invalid + + #resie to orginnal size + depth_map_transformed = cv2.resize(depth_map_transformed, depth_map_size[::-1]) + + # add header for vizualisation + if "AliceVision:CArr" not in depth_map_header: + # edit intrinsics pp with scale + intrinsics_dm = intrinsics[index] + # ? + # intrinsics_dm[0,2]/=depth_map_header["AliceVision:downscale"] + # intrinsics_dm[1,2]/=depth_map_header["AliceVision:downscale"] + camera_center = extrinsics[index][0:3, 3].tolist() + inverse_intr_rot = np.linalg.inv(intrinsics_dm @ np.linalg.inv(extrinsics[index][0:3, 0:3])) + + depth_map_header["AliceVision:CArr"] = camera_center + depth_map_header["AliceVision:iCamArr"]= inverse_intr_rot + + save_exr(depth_map_transformed, output_depth_map_path, custom_header=depth_map_header) + output_depth_map_paths.append(output_depth_map_path) + else: + for depth_map_file in depth_maps_path: + depth_map = open_depth_map(depth_map_file) + depth_map_transformed = transform(None, depth_map, None, None, None) + output_depth_map_path = os.path.join(output_folder, os.path.basename(depth_map_file)+"_depthMap.exr") + save_exr(depth_map_transformed, output_depth_map_path) + output_depth_map_paths.append(output_depth_map_path) + return output_depth_map_paths + chunk.logManager.start(chunk.node.verboseLevel.value) depth_folder = chunk.node.depthMapsFolder.value transform_function = eval(chunk.node.transform.value) diff --git a/mrrs/utils/ExecuteCmdConda.py b/mrrs/utils/ExecuteCmdConda.py deleted file mode 100644 index 129b33e..0000000 --- a/mrrs/utils/ExecuteCmdConda.py +++ /dev/null @@ -1,47 +0,0 @@ -__version__ = "1.0" - -from meshroom.core import desc -from meshroom.core.plugin import CondaNode - -class ExecuteCmdConda(CondaNode): - commandLine = '{commandLineValue}' - # gpu = desc.Level.HIGH - - category = 'Meshroom Research' - documentation = ''' ''' - - inputs = [ - desc.StringParam( - name='commandLine', - label='commandLine', - description=''' ''', - value='echo "Hello"', - ), - - desc.StringParam( - name='condaEnv', - label='condaEnv', - description='''''', - value='', - group='' - ), - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description='''Verbosity level (fatal, error, warning, info, debug, trace).''', - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - ), - ] - - outputs = [ ] - - def processChunk(self, chunk): - self.env_path = chunk.node.condaEnv.value - if chunk.node.condaEnv.value == '': #if no env, just call the normal cl - desc.CommandLineNode.processChunk(chunk) - else: - super().processChunk(chunk) - - diff --git a/mrrs/utils/InjectSfmData.py b/mrrs/utils/InjectSfmData.py index 7881230..aa31565 100644 --- a/mrrs/utils/InjectSfmData.py +++ b/mrrs/utils/InjectSfmData.py @@ -3,13 +3,14 @@ """ __version__ = "3.0" -import os + import json +import os + from meshroom.core import desc -from mrrs.core.geometry import * -from mrrs.core.ios import * +from meshroom.core.plugin import PluginNode -class InjectSfmData(desc.Node): +class InjectSfmData(PluginNode): category = 'Meshroom Research'#Machine Learning Effort for Meshroom #'Sparse Reconstruction' @@ -17,6 +18,8 @@ class InjectSfmData(desc.Node): size = desc.DynamicNodeSize('sourceSfmData') + envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") + inputs = [ desc.File( @@ -61,59 +64,36 @@ class InjectSfmData(desc.Node): ), ] - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if not chunk.node.sourceSfmData.value: - chunk.logger.warning('No input sourceSfmData in node InjectSfmData, skipping') - return False - if not chunk.node.targetSfmData.value: - chunk.logger.warning('No input targetSfmData in node InjectSfmData, skipping') - return False - return True - - def processChunk(self, chunk): + def pythonProcessChunk(self, args): """ Opens the dataset data. """ - try: - chunk.logManager.start(chunk.node.verboseLevel.value) - #check inputs - if not self.check_inputs(chunk): - return - chunk.logger.info("Starts to inject sfm data") - with open(chunk.node.sourceSfmData.value,"r") as json_file: - source_sfm_data= json.load(json_file) - with open(chunk.node.targetSfmData.value,"r") as json_file: - target_sfm_data= json.load(json_file) - # output_sfm = target_sfm_data.copy() - - for field in chunk.node.exportedFields.value: - chunk.logger.info("Injecting "+field) - if field not in source_sfm_data.keys(): - chunk.logger.info("Field "+field+" not found in "+chunk.node.sourceSfmData.value+", skipping") - continue - if field =="structure":#filter out - chunk.logger.info('Removing structure with no matching views') - #make sure the viewid in obeservation is in the lisy of views, otherwise remove - view_id = [view["viewId"] for view in target_sfm_data['views'] ] - for landmark in source_sfm_data[field]: - valid_observations =[] - for observation in landmark["observations"]: - if observation["observationId"] in view_id: - valid_observations.append(observation) - # else: - # chunk.logger.info('Removing obervation') - landmark["observations"]=valid_observations - - target_sfm_data[field]=source_sfm_data[field] - - with open(chunk.node.outputSfMData.value,"w") as json_file: - json_file.write(json.dumps(target_sfm_data, indent=2)) - chunk.logger.info('') - finally: - chunk.logManager.end() - - - + print("Starts to inject sfm data") + with open(args.sourceSfmData.value,"r") as json_file: + source_sfm_data= json.load(json_file) + with open(args.targetSfmData.value,"r") as json_file: + target_sfm_data= json.load(json_file) + + for field in args.exportedFields.value: + print("Injecting "+field) + if field not in source_sfm_data.keys(): + print("Field "+field+" not found in "+args.sourceSfmData.value+", skipping") + continue + if field =="structure":#filter out + print('Removing structure with no matching views') + #make sure the viewid in obeservation is in the lisy of views, otherwise remove + view_id = [view["viewId"] for view in target_sfm_data['views'] ] + for landmark in source_sfm_data[field]: + valid_observations =[] + for observation in landmark["observations"]: + if observation["observationId"] in view_id: + valid_observations.append(observation) + # else: + # print('Removing obervation') + landmark["observations"]=valid_observations + + target_sfm_data[field]=source_sfm_data[field] + + with open(args.outputSfMData.value,"w") as json_file: + json_file.write(json.dumps(target_sfm_data, indent=2)) + diff --git a/mrrs/utils/MeshTransform.py b/mrrs/utils/MeshTransform.py index f06c1dc..00a769a 100644 --- a/mrrs/utils/MeshTransform.py +++ b/mrrs/utils/MeshTransform.py @@ -6,15 +6,16 @@ import os import json from meshroom.core import desc -from mrrs.core.geometry import * -import trimesh +from meshroom.core.plugin import PluginNode -class MeshTransform(desc.Node):#FIXME: abstract this Dataset, scan folder etc...? +class MeshTransform(PluginNode):#FIXME: abstract this Dataset, scan folder etc...? category = 'Meshroom Research'#Machine Learning Effort for Meshroom #'Sparse Reconstruction' documentation = '''.''' + envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") + inputs = [ desc.File( @@ -71,56 +72,48 @@ class MeshTransform(desc.Node):#FIXME: abstract this Dataset, scan folder etc... ), ] - def check_inputs(self, chunk): - """ - Checks that all inputs are properly set. - """ - if not chunk.node.inputMesh.value: - chunk.logger.warning('No input inputMesh in node MeshTransform, skipping') - return False - return True - - def processChunk(self, chunk): + def pythonProcessChunk(self, args): """ Applies transform to a mesh. """ - chunk.logManager.start(chunk.node.verboseLevel.value) - mesh_file = chunk.node.inputMesh.value + from mrrs.core.geometry import mesh_transform, transform_cg_cv + import trimesh + import numpy as np + + mesh_file = args.inputMesh + #FIXME: dep to blender if mesh_file.endswith(".abc"): #make sure blender is in path #FIXME: todo #export with blender - script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../blender/alembic_convert.py")) + script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../blender/alembic_convert.py")) command_line = "blender -b -P "+script_path+" -- "+mesh_file+" "+\ - chunk.node.outputMesh.value[:-4]+".obj" + args.outputMesh[:-4]+".obj" print(command_line) # os.popen(command_line).read() os.system(command_line) - mesh_file = chunk.node.outputMesh.value[:-4]+".obj" + mesh_file = args.outputMesh[:-4]+".obj" mesh = trimesh.load(mesh_file) - if chunk.node.inputTransform.value != '': - #check inputs - if not self.check_inputs(chunk): - return - chunk.logger.info("Starts mesh transfrom") + if args.inputTransform != '': + print("Starts mesh transfrom") # Load transform - with open(chunk.node.inputTransform.value, "r") as json_file: + with open(args.inputTransform, "r") as json_file: T_dict = json.load(json_file) T = np.asarray(T_dict['transform'], np.float32) # Load, apply transform and save mesh mesh = mesh_transform(mesh,T) - if chunk.node.flipCG_CV.value: + if args.flipCG_CV: mesh.vertices = transform_cg_cv(mesh.vertices) #apply noise if any - if chunk.node.addGaussianNoise.value > 0: - mesh.vertices += chunk.node.addGaussianNoise.value*np.random.random(size=mesh.vertices.shape) + if args.addGaussianNoise > 0: + mesh.vertices += args.addGaussianNoise*np.random.random(size=mesh.vertices.shape) #save - mesh.export(chunk.node.outputMesh.value) \ No newline at end of file + mesh.export(args.outputMesh) diff --git a/mrrs/utils/Seq2Video.py b/mrrs/utils/Seq2Video.py deleted file mode 100644 index 1a2a171..0000000 --- a/mrrs/utils/Seq2Video.py +++ /dev/null @@ -1,49 +0,0 @@ -__version__ = "1.0" - -import os -from meshroom.core import desc - -class Seq2Video(desc.CommandLineNode): - #FIXme : rez env - commandLine = 'rez env ffmpeg -- ffmpeg -framerate {framerateValue} -y -pattern_type glob -i {imagesFolderValue}/{patternValue} {outputVideoValue}{videoFormatValue}' - gpu = desc.Level.NONE - - category = 'Meshroom Research' - documentation = ''' ''' - - inputs = [ - desc.File( - name='imagesFolder', - label='imagesFolder', - description=''' ''', - value='', - ), - desc.StringParam( - name='pattern', - label='pattern', - description=''' ''', - value='*.png', - ), - desc.FloatParam( - name='framerate', - label='Framerate', - description=''' ''', - value=25.0, - range=(1.0, 3000.0, 1.0), - ), - desc.StringParam( - name='videoFormat', - label='videoFormat', - description=''' ''', - value='.mp4', - ), - ] - - outputs = [ - desc.File( - name='outputVideo', - label='Output Video', - description=''' ''', - value=os.path.join(desc.Node.internalFolder, 'video'), - ), - ] diff --git a/mrrs/utils/utils_env.yaml b/mrrs/utils/utils_env.yaml new file mode 100644 index 0000000..41631e6 --- /dev/null +++ b/mrrs/utils/utils_env.yaml @@ -0,0 +1,15 @@ +name: 3drBench +channels: + - conda-forge + - open3d-admin + - defaults +dependencies: + - python #=3.9 + - numpy + - openimageio + - py-openimageio + - pip + - psutil + - pip: + #- git+https://github.com/alicevision/MeshroomResearch.git + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # From ee52dcd2b617a43c57545eddb22f6e411fe9dba9 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 31 Jul 2024 14:03:10 +0200 Subject: [PATCH 25/32] continued on refacto --- mrrs/colmap/Meshroom2ColmapSfmConvertions.py | 2 +- mrrs/colmap/env.yaml | 1 + mrrs/core/ios.py | 7 +- mrrs/deep_depth_map/VizMVSNet.py | 25 ++-- mrrs/deep_depth_map/env.yaml | 39 +++-- .../DeepFeatureExtraction.py | 9 +- .../deep_feature_matching/LightGlueMatcher.py | 9 +- mrrs/deep_feature_matching/LoftrMatcher.py | 10 +- mrrs/deep_feature_matching/MaskFeatures.py | 16 +- mrrs/deep_feature_matching/VizFeatures.py | 46 +++--- mrrs/deep_feature_matching/VizTracks.py | 20 +-- mrrs/deep_feature_matching/__init__.py | 0 mrrs/deep_feature_matching/env.yaml | 10 +- mrrs/gaussian_splatting/Dockerfile | 9 +- mrrs/gaussian_splatting/GaussianSplatting.py | 141 +++++++++--------- .../GaussianSplattingRender.py | 10 +- mrrs/nerf/env.yaml | 15 +- mrrs/utils/CalibTransform.py | 7 +- mrrs/utils/utils_env.yaml | 1 + 19 files changed, 212 insertions(+), 165 deletions(-) create mode 100644 mrrs/deep_feature_matching/__init__.py diff --git a/mrrs/colmap/Meshroom2ColmapSfmConvertions.py b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py index 7f21e3c..b11a507 100644 --- a/mrrs/colmap/Meshroom2ColmapSfmConvertions.py +++ b/mrrs/colmap/Meshroom2ColmapSfmConvertions.py @@ -91,7 +91,7 @@ def processChunk(self, chunk): new_images_path = [os.path.join( images_output_folder, basename) for basename in images_basename] #get if we must resize - do_resize = (chunk.node.maxImageSize.value == 0) and (image_sizes[0][0]>chunk.node.maxImageSize.value) + do_resize = (chunk.node.maxImageSize.value != 0) and (image_sizes[0][0]>chunk.node.maxImageSize.value) #modify .sfm with new sizes and filepath if do_resize: diff --git a/mrrs/colmap/env.yaml b/mrrs/colmap/env.yaml index 0110d38..5ef0edf 100644 --- a/mrrs/colmap/env.yaml +++ b/mrrs/colmap/env.yaml @@ -7,6 +7,7 @@ dependencies: - numpy - openimageio - py-openimageio + - opencv - pip - trimesh - pip: diff --git a/mrrs/core/ios.py b/mrrs/core/ios.py index a6301f2..b832b3e 100644 --- a/mrrs/core/ios.py +++ b/mrrs/core/ios.py @@ -5,7 +5,6 @@ import logging import re from struct import unpack -import OpenImageIO as oiio import numpy as np @@ -16,6 +15,8 @@ def open_exr(exr_path): ''' Uses oiio to import an EXR file. ''' + #lazy import to avoid strong dep + import OpenImageIO as oiio exr_file = oiio.ImageInput.open(exr_path) if exr_file is None : raise RuntimeError("Could not open exr file "+exr_path) @@ -36,6 +37,8 @@ def save_exr(input_array, output_file, """ Saves an exr for meshroom, using different formats. """ + #lazy import to avoid strong dep + import OpenImageIO as oiio if len(input_array.shape)<2 or len(input_array.shape)>3: raise RuntimeError('Data type not suported for save_exr') elif len(input_array.shape)==2:#gray level case @@ -154,6 +157,8 @@ def save_image(image_path, np_array, orientation=None, auto_rotate=False): Save an image in a numpy array. Range must be 0-255 and channel 1 or 3. """ + #lazy import to avoid strong dep + import OpenImageIO as oiio if len(np_array.shape)==2: np_array=np.expand_dims(np_array, axis = -1) out = oiio.ImageOutput.create(image_path) diff --git a/mrrs/deep_depth_map/VizMVSNet.py b/mrrs/deep_depth_map/VizMVSNet.py index eea64b4..76b4dd7 100644 --- a/mrrs/deep_depth_map/VizMVSNet.py +++ b/mrrs/deep_depth_map/VizMVSNet.py @@ -1,29 +1,22 @@ -import json import os -import numpy as np -import cv2 from meshroom.core import desc -from meshroom.core.plugin import PluginNode - -from mrrs.core.ios import matrices_from_sfm_data, open_depth_map, save_exr -from mrrs.core.utils import format_float_array +from meshroom.core.plugin import PluginCommandLineNode, EnvType EXEC = "python "+ os.path.join(os.path.dirname(__file__), "Vis-MVSNet/test.py") MODEL_PATH = os.path.join(os.path.dirname(__file__), "Vis-MVSNet/pretrained_model/vis") -class VizMVSNet(PluginNode): +class VizMVSNet(PluginCommandLineNode): - category = 'MRRS - Deep depth' + category = 'MRRS - Depth Maps' documentation = ''' ''' gpu = desc.Level.INTENSIVE commandLine = EXEC+" --data_root {outputFolderValue} --result_dir {outputFolderValue} --load_path "+MODEL_PATH\ +" {sizeParamValue} --write_result --dataset_name 'general' " - - #overides the env path - envfile = os.path.join(os.path.dirname(__file__), 'env.yaml') + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), 'env.yaml') inputs = [ desc.File( @@ -101,11 +94,17 @@ class VizMVSNet(PluginNode): ] def processChunk(self, chunk): + import json + import numpy as np + import cv2 + + from mrrs.core.ios import matrices_from_sfm_data, open_depth_map, save_exr + from mrrs.core.utils import format_float_array + chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfMData.value == "": raise RuntimeError('Must input SfM data') - #FIXME: move this to mvsnet sfm_data=json.load(open(chunk.node.inputSfMData.value,"r")) (extrinsics, intrinsics, views_id, poses_ids, intrinsics_ids, pixel_sizes_all_cams, images_size) = matrices_from_sfm_data(sfm_data, True) diff --git a/mrrs/deep_depth_map/env.yaml b/mrrs/deep_depth_map/env.yaml index 96ff843..eaa809f 100644 --- a/mrrs/deep_depth_map/env.yaml +++ b/mrrs/deep_depth_map/env.yaml @@ -1,17 +1,34 @@ name: visnmvsnet channels: - conda-forge - - open3d-admin - pytorch - - nvidia - defaults dependencies: - - python>=3.7.6 - - numpy>=1.18.1 - - opencv>=4.1.2.30 - - pytorch>=1.4.0 - - torchvision>=0.5 - - open3d>=0.9.0.0 # for point cloud I/O - - tqdm>=4.41.1 # only for the progressbar - - nvidia-apex>=0.1 # only for sync batch norm - - matplotlib>=3.1.3 # for visualization in val.py and test.py \ No newline at end of file + # from MVSNet + # - python>=3.7.6 + # - numpy>=1.18.1 + # - opencv>=4.1.2.30 + # - pytorch>=1.4.0 + # - torchvision>=0.5 + # - open3d>=0.9.0.0 # for point cloud I/O + # - tqdm>=4.41.1 # only for the progressbar + # - nvidia-apex>=0.1 # only for sync batch norm + # - matplotlib>=3.1.3 # for visualization in val.py and test.py + + # relaxing constraints and removing unecessary deps for forward + - python>=3.0.0 + - numpy + - pytorch + - torchvision + - opencv + - tqdm + - matplotlib + + #for meshroom & mrrs + - openimageio + - py-openimageio + - pip + - pip: + - psutil + - setuptools + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch #FIXME \ No newline at end of file diff --git a/mrrs/deep_feature_matching/DeepFeatureExtraction.py b/mrrs/deep_feature_matching/DeepFeatureExtraction.py index 46fc462..5c37671 100644 --- a/mrrs/deep_feature_matching/DeepFeatureExtraction.py +++ b/mrrs/deep_feature_matching/DeepFeatureExtraction.py @@ -3,21 +3,22 @@ from meshroom.core import desc -from meshroom.core.plugin import PluginNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/deep_feature_extraction.py") -class DeepFeatureExtraction(PluginNode): +class DeepFeatureExtraction(PluginCommandLineNode): - category = 'Sparse Reconstruction' + category = 'MRRS - Deep Matching' documentation = ''' ''' gpu = desc.Level.INTENSIVE commandLine = EXEC+" {allParams}" #overides the env path - envFile=os.path.dirname(__file__), 'env.yaml' + envFile=os.path.join(os.path.dirname(__file__), 'env.yaml') + envType=EnvType.CONDA inputs = [ desc.File( diff --git a/mrrs/deep_feature_matching/LightGlueMatcher.py b/mrrs/deep_feature_matching/LightGlueMatcher.py index 6008590..157c832 100644 --- a/mrrs/deep_feature_matching/LightGlueMatcher.py +++ b/mrrs/deep_feature_matching/LightGlueMatcher.py @@ -3,19 +3,20 @@ from meshroom.core import desc -from meshroom.core.plugin import PluginNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/light_glue_matcher.py") -class LightGlueMatching(PluginNode): +class LightGlueMatching(PluginCommandLineNode): - category = 'Sparse Reconstruction' + category = 'MRRS - Deep Matching' documentation = ''' ''' gpu = desc.Level.INTENSIVE commandLine = EXEC+" {allParams}" - envFile=os.path.dirname(__file__), 'env.yaml' + envFile=os.path.join(os.path.dirname(__file__), 'env.yaml') + envType=EnvType.CONDA inputs = [ desc.File( diff --git a/mrrs/deep_feature_matching/LoftrMatcher.py b/mrrs/deep_feature_matching/LoftrMatcher.py index 22d9c7a..1bb7318 100644 --- a/mrrs/deep_feature_matching/LoftrMatcher.py +++ b/mrrs/deep_feature_matching/LoftrMatcher.py @@ -2,20 +2,20 @@ import os from meshroom.core import desc - -from meshroom.core.plugin import PluginNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType LOFTR_EXEC = "python "+ os.path.join(os.path.dirname(__file__), "kornia_wrappers/loftr_matcher.py") -class LoftrMatcher(PluginNode): +class LoftrMatcher(PluginCommandLineNode): - category = 'Sparse Reconstruction' + category = 'MRRS - Deep Matching' documentation = ''' ''' gpu = desc.Level.INTENSIVE commandLine = LOFTR_EXEC+" {allParams}" - envFile=os.path.dirname(__file__), 'env.yaml' + envFile=os.path.join(os.path.dirname(__file__), 'env.yaml') + envType=EnvType.CONDA inputs = [ desc.File( diff --git a/mrrs/deep_feature_matching/MaskFeatures.py b/mrrs/deep_feature_matching/MaskFeatures.py index 820f108..8d0e013 100644 --- a/mrrs/deep_feature_matching/MaskFeatures.py +++ b/mrrs/deep_feature_matching/MaskFeatures.py @@ -1,17 +1,13 @@ __version__ = "3.0" import os -import json -import numpy as np from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType -from mrrs.core.ios import * -from .kornia_wrappers.utils import open_descriptor_file, write_descriptor_file +class MaskFeatures(PluginNode): -class MaskFeatures(desc.Node): - - category = 'Meshroom Research' + category = 'MRRS - Deep Matching' documentation = '''''' inputs = [ @@ -65,6 +61,12 @@ class MaskFeatures(desc.Node): def processChunk(self, chunk): """ """ + + import json + import numpy as np + from mrrs.core.ios import open_image + from .kornia_wrappers.utils import open_descriptor_file, write_descriptor_file + chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': raise RuntimeError("No inputSfM specified") diff --git a/mrrs/deep_feature_matching/VizFeatures.py b/mrrs/deep_feature_matching/VizFeatures.py index d1fef4a..c362625 100644 --- a/mrrs/deep_feature_matching/VizFeatures.py +++ b/mrrs/deep_feature_matching/VizFeatures.py @@ -1,34 +1,18 @@ __version__ = "3.0" import os -import json - -import cv2 -from mrrs.deep_feature_matching.utils import open_matches -import numpy as np +from meshroom.core.plugin import PluginNode, EnvType from meshroom.core import desc -from mrrs.core.ios import * -from mrrs.core.geometry import * - -def draw_keypoints(image, keypoints, downsample=1, p = 2, o = 0): - for kp in keypoints[::downsample]: - image[int(kp[1])-p:int(kp[1])+p, o+int(kp[0])-p:o+int(kp[0])+p, :]=[0,255,0] - return image - -def get_best_matching_view(view_matches): - values = list(view_matches.values()) - lengths = [v.shape[0] for v in values] - keys = list(view_matches.keys()) - index_max = np.argmax(lengths) - return keys[index_max], values[index_max] - -class VizFeatures(desc.Node): +class VizFeatures(PluginNode): - category = 'Meshroom Research' + category = 'MRRS - Deep Matching' documentation = '''''' + envFile=os.path.dirname(__file__), 'env.yaml' + envType=EnvType.CONDA + inputs = [ desc.File( name='inputSfM', @@ -125,6 +109,24 @@ class VizFeatures(desc.Node): def processChunk(self, chunk): """ """ + import numpy as np + import json + import cv2 + from mrrs.core.ios import open_image, save_image + from mrrs.deep_feature_matching.kornia_wrappers.utils import open_matches + + def draw_keypoints(image, keypoints, downsample=1, p = 2, o = 0): + for kp in keypoints[::downsample]: + image[int(kp[1])-p:int(kp[1])+p, o+int(kp[0])-p:o+int(kp[0])+p, :]=[0,255,0] + return image + + def get_best_matching_view(view_matches): + values = list(view_matches.values()) + lengths = [v.shape[0] for v in values] + keys = list(view_matches.keys()) + index_max = np.argmax(lengths) + return keys[index_max], values[index_max] + chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': raise RuntimeError("No inputSfM specified") diff --git a/mrrs/deep_feature_matching/VizTracks.py b/mrrs/deep_feature_matching/VizTracks.py index fb6b85b..b712217 100644 --- a/mrrs/deep_feature_matching/VizTracks.py +++ b/mrrs/deep_feature_matching/VizTracks.py @@ -3,20 +3,17 @@ import os import json -import cv2 - -import numpy as np - +from meshroom.core.plugin import PluginNode, EnvType from meshroom.core import desc -from mrrs.core.ios import * -from mrrs.core.geometry import * - -class VizTracks(desc.Node): +class VizTracks(PluginNode): - category = 'Meshroom Research' + category = 'MRRS - Deep Matching' documentation = '''''' + envFile=os.path.dirname(__file__), 'env.yaml' + envType=EnvType.CONDA + inputs = [ desc.File( name='inputSfM', @@ -89,6 +86,11 @@ class VizTracks(desc.Node): def processChunk(self, chunk): """ """ + import numpy as np + import json + import cv2 + from mrrs.core.ios import open_image, save_image + chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': raise RuntimeError("No inputSfM specified") diff --git a/mrrs/deep_feature_matching/__init__.py b/mrrs/deep_feature_matching/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mrrs/deep_feature_matching/env.yaml b/mrrs/deep_feature_matching/env.yaml index 17d4a58..b603905 100644 --- a/mrrs/deep_feature_matching/env.yaml +++ b/mrrs/deep_feature_matching/env.yaml @@ -11,7 +11,9 @@ dependencies: - pip - pip: - click - - - - + - psutil + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch + #mrrs + - opencv + - openimageio + - py-openimageio diff --git a/mrrs/gaussian_splatting/Dockerfile b/mrrs/gaussian_splatting/Dockerfile index 6dbd77f..b62a4dd 100644 --- a/mrrs/gaussian_splatting/Dockerfile +++ b/mrrs/gaussian_splatting/Dockerfile @@ -11,7 +11,7 @@ RUN apt install -y git RUN git clone https://github.com/graphdeco-inria/gaussian-splatting --recursive #installing GS deps -RUN pip install plyfile tqdm ninja +RUN pip install --no-cache-dir plyfile tqdm ninja #needed to build the rasterisation #https://www.data-mining.co.nz/docker-for-data-scientists/troubleshooting/ @@ -19,6 +19,9 @@ ARG TORCH_CUDA_ARCH_LIST="Pascal;Volta;Turing;Ampere" ENV TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}" # Build rasteriser and knn -RUN pip install gaussian-splatting/submodules/diff-gaussian-rasterization -RUN pip install gaussian-splatting/submodules/simple-knn +RUN pip install --no-cache-dir gaussian-splatting/submodules/diff-gaussian-rasterization +RUN pip install --no-cache-dir gaussian-splatting/submodules/simple-knn +#install MRRS +RUN git clone --branch refacto_for_plugin https://github.com/alicevision/MeshroomResearch.git +RUN pip install --no-cache-dir -e MeshroomResearch diff --git a/mrrs/gaussian_splatting/GaussianSplatting.py b/mrrs/gaussian_splatting/GaussianSplatting.py index 5d0be52..db68bd0 100644 --- a/mrrs/gaussian_splatting/GaussianSplatting.py +++ b/mrrs/gaussian_splatting/GaussianSplatting.py @@ -1,79 +1,18 @@ import os -from meshroom.core import desc -from meshroom.core.plugin import DockerNode from distutils.dir_util import copy_tree -from trimesh.exchange.ply import _parse_header, _ply_binary -import numpy as np - -def sigmoid(x): - return 1 / (1 + np.exp(-x)) - -def load_gs_ply(path, max_sh_degree=3): - """ - (modified from original repo) - """ - with open(path, 'rb') as f: - elements, _, _ = _parse_header(f) - _ply_binary(elements, f) - - xyz = np.stack((np.asarray(elements['vertex']['data']["x"]), - np.asarray(elements['vertex']['data']["y"]), - np.asarray(elements['vertex']['data']["z"])), axis=1) - opacities = np.asarray(elements['vertex']['data']["opacity"])[..., np.newaxis] - #aaply activation - opacities = sigmoid(opacities) - - features_dc = np.zeros((xyz.shape[0], 3, 1)) - features_dc[:, 0, 0] = np.asarray(elements['vertex']['data']["f_dc_0"]) - features_dc[:, 1, 0] = np.asarray(elements['vertex']['data']["f_dc_1"]) - features_dc[:, 2, 0] = np.asarray(elements['vertex']['data']["f_dc_2"]) - - extra_f_names = [p for p in elements['vertex']['properties'] if p.startswith("f_rest_")] - extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) - assert len(extra_f_names)==3*(max_sh_degree + 1) ** 2 - 3 - features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) - for idx, attr_name in enumerate(extra_f_names): - features_extra[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) - # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) - features_extra = features_extra.reshape((features_extra.shape[0], 3, (max_sh_degree + 1) ** 2 - 1)) - - scale_names = [p for p in elements['vertex']['properties']if p.startswith("scale_")] - scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) - scales = np.zeros((xyz.shape[0], len(scale_names))) - for idx, attr_name in enumerate(scale_names): - scales[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) - #scaling activation - scales=np.exp(scales) - - - - rot_names = [p for p in elements['vertex']['properties']if p.startswith("rot")] - rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) - rots = np.zeros((xyz.shape[0], len(rot_names))) - for idx, attr_name in enumerate(rot_names): - rots[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) - - - return xyz, rots, scales, \ - opacities, features_dc, features_extra - - -def rgb_from_sh(sh_0): - """ - Get RGB values for sh coef 0 (solid color) - """ - C0 = 0.28209479177387814 - result = C0 * sh_0 + 0.5 - result = np.clip(result, 0, 1) - return result +from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType -class GaussianSplatting(DockerNode): +class GaussianSplatting(PluginNode): category = 'GaussianSplatting' documentation = '''Node to optimise gaussian splats from a set of input views and poses.''' gpu = desc.Level.INTENSIVE + + envFile = os.path.join(os.path.dirname(__file__), 'Dockerfile') + envType = EnvType.DOCKER commandLine = "python gaussian-splatting/train.py -s /node_folder/input -m /node_folder/output {allParams} \ --save_iterations {iterationsValue}"# --test_iterations {iterationsValue} " @@ -189,6 +128,71 @@ class GaussianSplatting(DockerNode): ] def processChunk(self, chunk): + import trimesh + from trimesh.transformations import compose_matrix + from trimesh.creation import icosphere + from trimesh.exchange.ply import _parse_header, _ply_binary + import numpy as np + + def sigmoid(x): + return 1 / (1 + np.exp(-x)) + + def load_gs_ply(path, max_sh_degree=3): + """ + (modified from original repo) + """ + with open(path, 'rb') as f: + elements, _, _ = _parse_header(f) + _ply_binary(elements, f) + + xyz = np.stack((np.asarray(elements['vertex']['data']["x"]), + np.asarray(elements['vertex']['data']["y"]), + np.asarray(elements['vertex']['data']["z"])), axis=1) + opacities = np.asarray(elements['vertex']['data']["opacity"])[..., np.newaxis] + #aaply activation + opacities = sigmoid(opacities) + + features_dc = np.zeros((xyz.shape[0], 3, 1)) + features_dc[:, 0, 0] = np.asarray(elements['vertex']['data']["f_dc_0"]) + features_dc[:, 1, 0] = np.asarray(elements['vertex']['data']["f_dc_1"]) + features_dc[:, 2, 0] = np.asarray(elements['vertex']['data']["f_dc_2"]) + + extra_f_names = [p for p in elements['vertex']['properties'] if p.startswith("f_rest_")] + extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) + assert len(extra_f_names)==3*(max_sh_degree + 1) ** 2 - 3 + features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) + for idx, attr_name in enumerate(extra_f_names): + features_extra[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) + features_extra = features_extra.reshape((features_extra.shape[0], 3, (max_sh_degree + 1) ** 2 - 1)) + + scale_names = [p for p in elements['vertex']['properties']if p.startswith("scale_")] + scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) + scales = np.zeros((xyz.shape[0], len(scale_names))) + for idx, attr_name in enumerate(scale_names): + scales[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + #scaling activation + scales=np.exp(scales) + + rot_names = [p for p in elements['vertex']['properties']if p.startswith("rot")] + rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) + rots = np.zeros((xyz.shape[0], len(rot_names))) + for idx, attr_name in enumerate(rot_names): + rots[:, idx] = np.asarray(elements['vertex']['data'][attr_name]) + + + return xyz, rots, scales, \ + opacities, features_dc, features_extra + + def rgb_from_sh(sh_0): + """ + Get RGB values for sh coef 0 (solid color) + """ + C0 = 0.28209479177387814 + result = C0 * sh_0 + 0.5 + result = np.clip(result, 0, 1) + return result + #copy input data to node's folder (we mount only this folder) input_folder = os.path.join(chunk.node.internalFolder, 'input') output_folder = os.path.join(chunk.node.internalFolder, 'output') @@ -202,9 +206,6 @@ def processChunk(self, chunk): # create 3D display output_mesh = os.path.join(output_folder, "point_cloud", "iteration_%d"%chunk.node.iterations.value, "point_cloud.ply") - import trimesh - from trimesh.transformations import compose_matrix - from trimesh.creation import icosphere gaussians = load_gs_ply(output_mesh) meshes = [] diff --git a/mrrs/gaussian_splatting/GaussianSplattingRender.py b/mrrs/gaussian_splatting/GaussianSplattingRender.py index 5f735de..1750fdb 100644 --- a/mrrs/gaussian_splatting/GaussianSplattingRender.py +++ b/mrrs/gaussian_splatting/GaussianSplattingRender.py @@ -1,11 +1,12 @@ import os -from meshroom.core import desc -from meshroom.core.plugin import DockerNode from distutils.dir_util import copy_tree from shutil import move -class GaussianSplattingRender(DockerNode): +from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType + +class GaussianSplattingRender(PluginNode): category = 'GaussianSplatting' documentation = '''Node to render frames from a .sfm and the optimised gaussian splats.''' @@ -13,7 +14,8 @@ class GaussianSplattingRender(DockerNode): commandLine = "python gaussian-splatting/render.py -s /node_folder/input_scene -m /node_folder/input_model" - envFile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Dockerfile')) + envFile = os.path.join(os.path.dirname(__file__), 'Dockerfile') + envType = EnvType.DOCKER inputs = [ desc.File( diff --git a/mrrs/nerf/env.yaml b/mrrs/nerf/env.yaml index dabd135..2776875 100644 --- a/mrrs/nerf/env.yaml +++ b/mrrs/nerf/env.yaml @@ -1,4 +1,4 @@ -#export LIBRARY_PATH=/usr/lib64 is needed to point to libcuda (lib from the driver) + name: nerfstudio channels: - nvidia/label/cuda-11.8.0 #for cuda toolkit 11.8.0 @@ -22,7 +22,14 @@ dependencies: # - torchvision==0.17.0+cu118 #this doenst work because of ordering - git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch #need to have pytorch installed to install # - nerfstudio - - +#is needed to point to libcuda (lib from the driver) variables: - LIBRARY_PATH: /usr/lib64 \ No newline at end of file + LIBRARY_PATH: /usr/lib64 + + #mrrs/meshroom + - pip: + - psutil + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch + - opencv + - openimageio + - py-openimageio diff --git a/mrrs/utils/CalibTransform.py b/mrrs/utils/CalibTransform.py index 2a0bf20..9432eaa 100644 --- a/mrrs/utils/CalibTransform.py +++ b/mrrs/utils/CalibTransform.py @@ -5,9 +5,6 @@ from meshroom.core import desc -from mrrs.core.ios import * -from mrrs.core.geometry import * - from mrrs.core.geometry import CG_CV_MAT33 class Transforms(): @@ -128,6 +125,10 @@ def processChunk(self, chunk): """ Computes the different transforms """ + + import numpy as np + from mrrs.core.geometry import sfm_data_from_matrices, matrices_from_sfm_data + chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': raise RuntimeError("No inputSfM specified") diff --git a/mrrs/utils/utils_env.yaml b/mrrs/utils/utils_env.yaml index 41631e6..bdc3b6c 100644 --- a/mrrs/utils/utils_env.yaml +++ b/mrrs/utils/utils_env.yaml @@ -10,6 +10,7 @@ dependencies: - py-openimageio - pip - psutil + - trimesh - pip: #- git+https://github.com/alicevision/MeshroomResearch.git - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # From 17476b45ce61b36aba9d409a7cabe056ce59105e Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 31 Jul 2024 14:53:31 +0200 Subject: [PATCH 26/32] fixed deep_feature --- mrrs/deep_feature_matching/env.yaml | 32 ++++++++++++------- .../deep_feature_extraction.py | 5 ++- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/mrrs/deep_feature_matching/env.yaml b/mrrs/deep_feature_matching/env.yaml index b603905..a6245b3 100644 --- a/mrrs/deep_feature_matching/env.yaml +++ b/mrrs/deep_feature_matching/env.yaml @@ -2,18 +2,28 @@ name: deepFeatures channels: - nvidia - pytorch + - conda-forge dependencies: - - python==3.8 - - pytorch==1.13.1 - - pytorch-cuda==11.6 - - torchvision==0.14.1 - - conda-forge::kornia==0.7.0 - - pip - - pip: - - click - - psutil - - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch - #mrrs + # first tests @mikros + # - python==3.8 + # - pytorch==1.13.1 + # - pytorch-cuda==11.6 + # - torchvision==0.14.1 + # - conda-forge::kornia==0.7.0 + + # relaxing constraints and removing unecessary deps for forward + - python>=3.0.0 + - numpy + - pytorch + - torchvision - opencv + - kornia + + #for meshroom & mrrs - openimageio - py-openimageio + - pip + - pip: + - psutil + - setuptools + - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch #FIXME diff --git a/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py b/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py index 84194b1..3e6349a 100644 --- a/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py +++ b/mrrs/deep_feature_matching/kornia_wrappers/deep_feature_extraction.py @@ -2,12 +2,15 @@ import os import click import numpy as np +import sys import kornia import torch from torch.nn.functional import pad -from .utils import time_it, open_and_prepare_image, write_descriptor_file +import sys +sys.path.insert(0,os.path.dirname(__file__)) +from utils import time_it, open_and_prepare_image, write_descriptor_file FEATURE_SIZE = 128 From e688db9d1c75786a41dac1c7dc87fa82ccadebc1 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 31 Jul 2024 15:11:17 +0200 Subject: [PATCH 27/32] util fixes --- mrrs/utils/CalibTransform.py | 137 ++++++++++++++++++----------------- mrrs/utils/ConvertImages.py | 23 +++--- mrrs/utils/CopyData.py | 4 +- mrrs/utils/CutSfm.py | 24 ++---- mrrs/utils/InjectSfmData.py | 7 +- mrrs/utils/MeshTransform.py | 7 +- 6 files changed, 100 insertions(+), 102 deletions(-) diff --git a/mrrs/utils/CalibTransform.py b/mrrs/utils/CalibTransform.py index 9432eaa..f71a249 100644 --- a/mrrs/utils/CalibTransform.py +++ b/mrrs/utils/CalibTransform.py @@ -4,77 +4,19 @@ import json from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType -from mrrs.core.geometry import CG_CV_MAT33 +transforms_names = ["id", "cg2cv", "custom", "inv", + "scale", "center", "set_focal"]#[f for f in Transforms.__dict__.keys() if not f.startswith("__")] -class Transforms(): - """ - class used as namespace to automatically include in gui - """ - def id(extrinsics, intrinsics, param): - """ - Identity transform, does nothing - """ - return extrinsics, intrinsics - - def cg2cv(extrinsics, intrinsics, param): - """ - Swap from CG to CV coordinate system - """ - extrinsics=CG_CV_MAT33@extrinsics - return extrinsics, intrinsics - - def custom(extrinsics, intrinsics, param): - """ - Will use the passed param_array to transform the poses - """ - extrinsics=param@extrinsics - return extrinsics, intrinsics +class CalibTransform(PluginNode): - def inv(extrinsics, intrinsics, param): - """ - Inv of the poses - """ - extrinsics = [np.linalg.inv(np.concatenate( [e,[[0,0,0,1]]] )) for e in extrinsics] - return extrinsics, intrinsics - - def scale(extrinsics, intrinsics, param): - """ - """ - extrinsics=np.asarray(extrinsics) - center = np.mean(extrinsics[:,0:3,3], axis=0 ) - for i in range(len(extrinsics)): - extrinsics[i][0:3,3] -= center - extrinsics[i][0:3,3] *= param - extrinsics[i][0:3,3] += center - return extrinsics, intrinsics - - def center(extrinsics, intrinsics, param): - """ - Will normalise the calib such that the camera centers are between -1 and 1 - """ - extrinsics=np.asarray(extrinsics) - center = np.mean(extrinsics[:,0:3,3], axis=0 ) - for i in range(len(extrinsics)): - extrinsics[i][0:3,3] -= center - return extrinsics, intrinsics - - def set_focal(extrinsics, intrinsics, param): - """ - Set focal - """ - intrinsics=np.asarray(intrinsics) - intrinsics[:,0,0] = param - intrinsics[:,1,1] = param - return extrinsics, intrinsics - -transforms_names = [f for f in Transforms.__dict__.keys() if not f.startswith("__")] - -class CalibTransform(desc.Node): - - category = 'Meshroom Research' + category = 'MRRS - Utils' documentation = '''''' + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") + inputs = [ desc.File( name='inputSfM', @@ -128,6 +70,69 @@ def processChunk(self, chunk): import numpy as np from mrrs.core.geometry import sfm_data_from_matrices, matrices_from_sfm_data + from mrrs.core.geometry import CG_CV_MAT33 + + class Transforms(): + """ + class used as namespace to automatically include in gui + """ + + def id(extrinsics, intrinsics, param): + """ + Identity transform, does nothing + """ + return extrinsics, intrinsics + + def cg2cv(extrinsics, intrinsics, param): + """ + Swap from CG to CV coordinate system + """ + extrinsics=CG_CV_MAT33@extrinsics + return extrinsics, intrinsics + + def custom(extrinsics, intrinsics, param): + """ + Will use the passed param_array to transform the poses + """ + extrinsics=param@extrinsics + return extrinsics, intrinsics + + def inv(extrinsics, intrinsics, param): + """ + Inv of the poses + """ + extrinsics = [np.linalg.inv(np.concatenate( [e,[[0,0,0,1]]] )) for e in extrinsics] + return extrinsics, intrinsics + + def scale(extrinsics, intrinsics, param): + """ + """ + extrinsics=np.asarray(extrinsics) + center = np.mean(extrinsics[:,0:3,3], axis=0 ) + for i in range(len(extrinsics)): + extrinsics[i][0:3,3] -= center + extrinsics[i][0:3,3] *= param + extrinsics[i][0:3,3] += center + return extrinsics, intrinsics + + def center(extrinsics, intrinsics, param): + """ + Will normalise the calib such that the camera centers are between -1 and 1 + """ + extrinsics=np.asarray(extrinsics) + center = np.mean(extrinsics[:,0:3,3], axis=0 ) + for i in range(len(extrinsics)): + extrinsics[i][0:3,3] -= center + return extrinsics, intrinsics + + def set_focal(extrinsics, intrinsics, param): + """ + Set focal + """ + intrinsics=np.asarray(intrinsics) + intrinsics[:,0,0] = param + intrinsics[:,1,1] = param + return extrinsics, intrinsics chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': diff --git a/mrrs/utils/ConvertImages.py b/mrrs/utils/ConvertImages.py index 38d5a8e..021b0e5 100644 --- a/mrrs/utils/ConvertImages.py +++ b/mrrs/utils/ConvertImages.py @@ -6,23 +6,19 @@ import os import json -import cv2 -from mrrs.core.utils import cv2_resize_with_pad -import numpy as np - -from mrrs.core.ios import open_image, save_image from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType - -class ConvertImages(desc.Node): +class ConvertImages(PluginNode): """ Generic node to perform segmentation. """ - size = desc.DynamicNodeSize('input') - category = 'Meshroom Research' - documentation = '''Node to convert images into a specific file format''' - size = desc.DynamicNodeSize('input') + category = 'MRRS - Utils' + documentation = '''''' + + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") inputs = [ desc.File( @@ -146,6 +142,11 @@ def check_inputs(self, chunk): return True def processChunk(self, chunk): + import cv2 + from mrrs.core.utils import cv2_resize_with_pad + import numpy as np + from mrrs.core.ios import open_image, save_image + try: chunk.logManager.start(chunk.node.verboseLevel.value) if not self.check_inputs(chunk): diff --git a/mrrs/utils/CopyData.py b/mrrs/utils/CopyData.py index 14a67ba..b3a42d7 100644 --- a/mrrs/utils/CopyData.py +++ b/mrrs/utils/CopyData.py @@ -11,9 +11,9 @@ from distutils.dir_util import copy_tree class CopyData(desc.Node): - category = 'Meshroom Research' - documentation = ''' ''' + category = 'MRRS - Utils' + documentation = '''''' inputs = [ diff --git a/mrrs/utils/CutSfm.py b/mrrs/utils/CutSfm.py index 1c813f7..5b371a2 100644 --- a/mrrs/utils/CutSfm.py +++ b/mrrs/utils/CutSfm.py @@ -4,17 +4,16 @@ import json from meshroom.core import desc +from meshroom.core.plugin import PluginNode, EnvType -from mrrs.core.ios import * -from mrrs.core.geometry import * +class CutSfm(PluginNode): -from mrrs.core.geometry import CG_CV_MAT33 - -class CutSfm(desc.Node): - - category = 'Meshroom Research' + category = 'MRRS - Utils' documentation = '''''' + envType = EnvType.CONDA + envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") + inputs = [ desc.File( name='inputSfM', @@ -57,19 +56,12 @@ class CutSfm(desc.Node): ) ] - def extract_frames(extrinsics, intrinsics, param): - """ - Extract a set of frames from the .sfm - """ - param=param.astype(np.int32) - extrinsics = np.asarray(extrinsics)[param] - return extrinsics, intrinsics - - def processChunk(self, chunk): """ Computes the different transforms """ + import numpy as np + chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': raise RuntimeError("No inputSfM specified") diff --git a/mrrs/utils/InjectSfmData.py b/mrrs/utils/InjectSfmData.py index aa31565..60de0fc 100644 --- a/mrrs/utils/InjectSfmData.py +++ b/mrrs/utils/InjectSfmData.py @@ -8,16 +8,15 @@ import os from meshroom.core import desc -from meshroom.core.plugin import PluginNode +from meshroom.core.plugin import PluginNode, EnvType class InjectSfmData(PluginNode): - category = 'Meshroom Research'#Machine Learning Effort for Meshroom #'Sparse Reconstruction' + category = 'MRRS - Utils' documentation = '''This node injects some fields from a source sfm data to a target sfm data.''' - size = desc.DynamicNodeSize('sourceSfmData') - + envType = EnvType.CONDA envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") inputs = [ diff --git a/mrrs/utils/MeshTransform.py b/mrrs/utils/MeshTransform.py index 00a769a..cda9949 100644 --- a/mrrs/utils/MeshTransform.py +++ b/mrrs/utils/MeshTransform.py @@ -6,14 +6,15 @@ import os import json from meshroom.core import desc -from meshroom.core.plugin import PluginNode +from meshroom.core.plugin import PluginNode, EnvType class MeshTransform(PluginNode):#FIXME: abstract this Dataset, scan folder etc...? - category = 'Meshroom Research'#Machine Learning Effort for Meshroom #'Sparse Reconstruction' + category = 'MRRS - Utils'#Machine Learning Effort for Meshroom #'Sparse Reconstruction' - documentation = '''.''' + documentation = '''''' + envType = EnvType.CONDA envFile = os.path.join(os.path.dirname(__file__), "utils_env.yaml") inputs = [ From 53f1df73e3f445e43c92f1083320eef31f80efec Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Wed, 31 Jul 2024 16:32:43 +0200 Subject: [PATCH 28/32] fixes --- .../kornia_wrappers/light_glue_matcher.py | 4 +++- mrrs/deep_feature_matching/kornia_wrappers/loftr_matcher.py | 4 +++- mrrs/utils/utils_env.yaml | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py b/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py index deef556..f6ca813 100644 --- a/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py +++ b/mrrs/deep_feature_matching/kornia_wrappers/light_glue_matcher.py @@ -6,7 +6,9 @@ import kornia import torch -from .utils import time_it, open_image_grapĥ, open_descriptor_file +import sys +sys.path.insert(0,os.path.dirname(__file__)) +from utils import time_it, open_and_prepare_image, write_descriptor_file @click.command() @click.option('--inputSfMData', help='Input sfm data') diff --git a/mrrs/deep_feature_matching/kornia_wrappers/loftr_matcher.py b/mrrs/deep_feature_matching/kornia_wrappers/loftr_matcher.py index d88c305..ad01b84 100644 --- a/mrrs/deep_feature_matching/kornia_wrappers/loftr_matcher.py +++ b/mrrs/deep_feature_matching/kornia_wrappers/loftr_matcher.py @@ -8,7 +8,9 @@ from kornia.feature.loftr.loftr import LoFTR from kornia.feature.loftr.loftr import default_cfg#the default config file -from .utils import time_it, open_and_prepare_image, open_image_grapĥ, write_descriptor_file +import sys +sys.path.insert(0,os.path.dirname(__file__)) +from utils import time_it, open_and_prepare_image, write_descriptor_file def get_all_keypoints(feature_map_size): """ diff --git a/mrrs/utils/utils_env.yaml b/mrrs/utils/utils_env.yaml index bdc3b6c..f6dd8e9 100644 --- a/mrrs/utils/utils_env.yaml +++ b/mrrs/utils/utils_env.yaml @@ -1,7 +1,6 @@ name: 3drBench channels: - conda-forge - - open3d-admin - defaults dependencies: - python #=3.9 @@ -14,3 +13,4 @@ dependencies: - pip: #- git+https://github.com/alicevision/MeshroomResearch.git - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # + - setuptools From 0424b5a6577ef16a7b4d5cdd7ae590db11b10f94 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Thu, 26 Sep 2024 12:21:43 +0200 Subject: [PATCH 29/32] fixes extrinsics and export xmp --- mrrs/core/geometry.py | 2 +- mrrs/core/ios.py | 49 +++++++++++++++++++++-- mrrs/reality_capture/ExportXMP.py | 21 ++++++++-- mrrs/reality_capture/env.yaml | 3 +- mrrs/reality_capture/reality_capture.py | 53 +++++++++++++++++-------- mrrs/utils/ConvertImages.py | 1 + 6 files changed, 104 insertions(+), 25 deletions(-) diff --git a/mrrs/core/geometry.py b/mrrs/core/geometry.py index aa5aef6..dec78f4 100644 --- a/mrrs/core/geometry.py +++ b/mrrs/core/geometry.py @@ -45,7 +45,7 @@ def camera_projection(vertices, extrinsic, intrinsic, pixel_size=0): vertices_homo = vertices if vertices.shape[-1] != 4:#if not homo make homo vertices_homo = make_homogeneous(vertices) - # project vertices into the camera + # project vertices into the camera FIXME: extrinsic are supposed to be world to cam, so no inv extrinsic = np.linalg.inv(np.concatenate([extrinsic[0:3, 0:4], [[0, 0, 0, 1]]], axis=0))[0:3, 0:4] # vertices in camera CS vertices_camera_cs = extrinsic @ np.transpose(vertices_homo) diff --git a/mrrs/core/ios.py b/mrrs/core/ios.py index b832b3e..c038135 100644 --- a/mrrs/core/ios.py +++ b/mrrs/core/ios.py @@ -148,7 +148,7 @@ def open_image(image_path, auto_rotate=False, return_orientation=False, to_srgb= if len(image.shape)==2: image = np.expand_dims(image, -1) if return_orientation: - return image, orientation + return image, int(orientation) else: return image @@ -299,12 +299,39 @@ def parse_extrisic_sfm_data(sfm_pose): Extracts the relevant items from a sfm pose dictionary. The pose is camera to world. """ + # pose_id = sfm_pose['poseId'] + # rotation = np.asarray(sfm_pose['pose']['transform']['rotation'], dtype=np.float32) + # rotation = rotation.reshape([3,3], order='F') + # center = np.asarray(sfm_pose['pose']['transform']['center'], dtype=np.float32) + # translation = - rotation @ center + # extrinsic = np.concatenate([rotation, np.expand_dims(translation, axis=-1)], axis=-1) + + # pose_id = sfm_pose['poseId'] + # rotation = np.asarray(sfm_pose['pose']['transform']['rotation'], dtype=np.float32) + # rotation = rotation.reshape([3,3]) + # translation = np.asarray(sfm_pose['pose']['transform']['center'], dtype=np.float32) + # extrinsic = np.concatenate([rotation, np.expand_dims(translation, axis=-1)], axis=-1) + pose_id = sfm_pose['poseId'] rotation = np.asarray(sfm_pose['pose']['transform']['rotation'], dtype=np.float32) - rotation = rotation.reshape([3,3]) - translation = np.asarray(sfm_pose['pose']['transform']['center'], dtype=np.float32) + rotation = rotation.reshape([3,3]).transpose() + center = np.asarray(sfm_pose['pose']['transform']['center'], dtype=np.float32) + translation = -rotation@center extrinsic = np.concatenate([rotation, np.expand_dims(translation, axis=-1)], axis=-1) + return extrinsic, pose_id + +def parse_subpose_sfm_data(sfm_pose): + """ + Parse pose from rig + """ + rotation = np.asarray(sfm_pose['pose']['rotation'], dtype=np.float32) + rotation = rotation.reshape([3,3]).transpose() + center = np.asarray(sfm_pose['pose']['center'], dtype=np.float32) + translation = -rotation@center + + extrinsic = np.concatenate([rotation, np.expand_dims(translation, axis=-1)], axis=-1) + return extrinsic def get_image_sizes(sfm_data): return [ (int(view["width"]), int(view["height"])) for view in sfm_data["views"] ] @@ -336,6 +363,12 @@ def matrices_from_sfm_data(sfm_data, return_image_sizes=False): intrinsics_id.append(intrinsic_id) pixel_sizes.append(pixel_size) + #rig case + is_rig = False + if 'rigs' in sfm_data : + is_rig = True + rigs = sfm_data["rigs"] + #returns view and poses for each view poses_id = np.asarray(poses_id) intrinsics_id=np.asarray(intrinsics_id) @@ -343,6 +376,7 @@ def matrices_from_sfm_data(sfm_data, return_image_sizes=False): extrinsics_all_cams = [] pixel_sizes_all_cams = [] image_sizes = [] + for view in sfm_data["views"]: view_id = view["viewId"] views_id.append(view_id) @@ -358,7 +392,14 @@ def matrices_from_sfm_data(sfm_data, return_image_sizes=False): intrinsic_index = np.where(intrinsics_id==intrinsic_id)[0] #fetch the correspoding poses and intrinsics intrinsics_all_cams.append(intrinsics[intrinsic_index[0]].copy()) - extrinsics_all_cams.append(extrinsics[pose_index[0]].copy()) + extrinsic = extrinsics[pose_index[0]].copy() + if is_rig: + rig_id = view["rigId"] + sub_pose_index=view["subPoseId"] + rig = [r for r in rigs if r["rigId"] == rig_id][0] #NOTE: error if more than 1? + sub_pose=parse_subpose_sfm_data(rig["subPoses"][int(sub_pose_index)]) + extrinsic = np.concatenate([sub_pose, [[0,0,0,1]]])@np.concatenate([extrinsic, [[0,0,0,1]]]) + extrinsics_all_cams.append(extrinsic) pixel_sizes_all_cams.append(pixel_sizes[intrinsic_index[0]]) image_sizes.append([int(view["width"]), int(view["height"])]) pixel_sizes_all_cams=np.asarray(pixel_sizes_all_cams) diff --git a/mrrs/reality_capture/ExportXMP.py b/mrrs/reality_capture/ExportXMP.py index 7f7344a..1a6f43f 100644 --- a/mrrs/reality_capture/ExportXMP.py +++ b/mrrs/reality_capture/ExportXMP.py @@ -6,17 +6,18 @@ import os from meshroom.core import desc -from meshroom.core.plugin import PluginNode +from meshroom.core.plugin import PluginCommandLineNode, EnvType -class ExportXMP(PluginNode): +class ExportXMP(PluginCommandLineNode): category = 'MRRS - Reality Capture' documentation = '''Node to create an XMP file from camera calibration.''' - commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "reality_capture.py")+'" exportxmp {sfmDataValue} {outputFolderValue} ' + commandLine = 'python "'+os.path.join(os.path.dirname(__file__), "reality_capture.py")+'" exportxmp {sfmDataValue} {outputFolderValue} {exportImageValue} {useUIDValue}' envFile = os.path.join(os.path.dirname(__file__), "env.yaml") + envType = EnvType.CONDA inputs = [ @@ -36,6 +37,20 @@ class ExportXMP(PluginNode): value="", ), + desc.BoolParam( + name='exportImage', + label='Export Image', + description='''''', + value=False, + ), + + desc.BoolParam( + name='useUID', + label='Use UID', + description='''''', + value=False, + ), + desc.ChoiceParam( name='verboseLevel', label='Verbose Level', diff --git a/mrrs/reality_capture/env.yaml b/mrrs/reality_capture/env.yaml index fd4d02a..fb3499e 100644 --- a/mrrs/reality_capture/env.yaml +++ b/mrrs/reality_capture/env.yaml @@ -11,4 +11,5 @@ dependencies: - pip - pip: #- git+https://github.com/alicevision/MeshroomResearch.git - - /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # + - -e /s/apps/users/multiview/mrrs/hogm/MeshroomResearch # + - psutil diff --git a/mrrs/reality_capture/reality_capture.py b/mrrs/reality_capture/reality_capture.py index f03a86c..46febfe 100644 --- a/mrrs/reality_capture/reality_capture.py +++ b/mrrs/reality_capture/reality_capture.py @@ -1,3 +1,5 @@ +from enum import unique +from pathlib import Path import re import os import numpy as np @@ -7,7 +9,7 @@ from mrrs.core.ios import get_image_sizes, matrices_from_sfm_data, sfm_data_from_matrices #in RC the sensor size is set to 35mm -SENSOR_SIZE = 35 +SENSOR_SIZE = 36 def _parse_xmp(xmp_file): """ @@ -61,17 +63,16 @@ def _export_xmp(xmp_file, extrinsics, intrinsics, pixel_size, image_size): our_sensor_width = pixel_size*image_size[0] our_sensor_height = pixel_size*image_size[1] our_focal = intrinsics[0,0] + #turn focal from unit sensor into equivalent 35mm focal = our_focal*SENSOR_SIZE/our_sensor_width - #convert pp in mm into offset from center in mm - principal_point_u = intrinsics[0,2]-our_sensor_height/2 - principal_point_v = intrinsics[1,2]-our_sensor_width/2 - #pass it into relative - principal_point_u /= our_sensor_height - principal_point_v /= our_sensor_width - rotation = np.linalg.inv(extrinsics[0:3,0:3]) - position = extrinsics[0:3, 3] + # convert pp in pixel, + principal_point_u = (intrinsics[0,2] - (our_sensor_width/2.0)) / our_sensor_width + principal_point_v = (intrinsics[1,2] - (our_sensor_height/2.0)) / our_sensor_width + + rotation = extrinsics[0:3,0:3] + position = -extrinsics[0:3, 0:3].transpose()@extrinsics[0:3, 3] def format_array(array): formated_str = "" @@ -94,6 +95,8 @@ def format_array(array): """.format(str(focal), principal_point_u, principal_point_v, format_array(rotation.flatten()), format_array(position), '0 0 0 0 0 0')#FIXME: for now we dont support distortion + if not os.path.exists(os.path.dirname(xmp_file)): + os.makedirs(os.path.dirname(xmp_file)) with open(xmp_file, "w") as f: f.write(xmp_string) @@ -134,7 +137,7 @@ def _import_xmp(sfm_data, xmp_folder): i[0, 0] /= pixel_size i[1, 1] /= pixel_size # convert principal point in pixels - # https://support.capturingreality.com/hc/en-us/community/posts/115002199052-Unit-and-convention-of-PrincipalPointU-and-PrincipalPointV + # https://sup7port.capturingreality.com/hc/en-us/community/posts/115002199052-Unit-and-convention-of-PrincipalPointU-and-PrincipalPointV # dimentionless because already /35 => we pass it into pixels, and offset from top image #lookign for "-0.75009676916349455", "-5.1187297220630112" @@ -178,19 +181,37 @@ def importXMP(sfmdata, xmpdata, outputsfmdata): @rc.command() @click.argument("sfmdata") @click.argument("outputfolder") -def exportXMP(sfmdata, outputfolder): +@click.argument("exportimage",type=bool) +@click.argument("useuid",type=bool) +def exportXMP(sfmdata, outputfolder, exportimage, useuid): sfm_data = json.load(open(sfmdata, "r")) + (extrinsics_all_cams, intrinsics_all_cams, views_id, poses_id, intrinsics_id, pixel_sizes_all_cams) = matrices_from_sfm_data(sfm_data) + image_sizes = get_image_sizes(sfm_data) - images_names = [os.path.basename(view["path"])[:-4] for view in sfm_data["views"]] - for image_name, extrinsics, intrinsics, pixel_size, image_size in zip(images_names, extrinsics_all_cams, - intrinsics_all_cams, pixel_sizes_all_cams, image_sizes): + + for view_idx, view in enumerate(sfm_data["views"]): + image_name = os.path.join( + os.path.basename(os.path.dirname(view["path"])), + os.path.basename(view["path"])[:-4] + ) + extrinsics = extrinsics_all_cams[view_idx] + intrinsics = intrinsics_all_cams[view_idx] + pixel_size = pixel_sizes_all_cams[view_idx] + image_size = image_sizes[view_idx] + if extrinsics is not None: - xmp_file = os.path.join(outputfolder, image_name+".xmp") + if useuid: + xmp_file = os.path.join(outputfolder, views_id[view_idx]+".xmp") + else: + xmp_file = os.path.join(outputfolder, image_name+".xmp") _export_xmp(xmp_file, extrinsics, intrinsics, pixel_size,image_size ) - + if exportimage: + image_file = os.path.join(outputfolder, os.path.basename(view["path"])) + if not os.path.exists(image_file): + os.symlink(view["path"], image_file) if __name__ == '__main__': rc() diff --git a/mrrs/utils/ConvertImages.py b/mrrs/utils/ConvertImages.py index 021b0e5..c4f6cb8 100644 --- a/mrrs/utils/ConvertImages.py +++ b/mrrs/utils/ConvertImages.py @@ -164,6 +164,7 @@ def processChunk(self, chunk): #modify the corresponding intrinsic (! done multiple time becasue sevearl view share one intricic) intrinsicId = sfm_data["views"][index]["intrinsicId"] frameId = int(sfm_data["views"][index]["frameId"]) + print(orientation) chunk.logger.info('\tOrientation %d'%orientation) resample_x=chunk.node.resampleX.value if chunk.node.autoPixelRatio.value: From 86f6919eb4d568a5de45b40e2f68bb844a26e029 Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Mon, 30 Sep 2024 17:11:50 +0200 Subject: [PATCH 30/32] fix xmp --- mrrs/reality_capture/reality_capture.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mrrs/reality_capture/reality_capture.py b/mrrs/reality_capture/reality_capture.py index 46febfe..00aeaea 100644 --- a/mrrs/reality_capture/reality_capture.py +++ b/mrrs/reality_capture/reality_capture.py @@ -63,14 +63,15 @@ def _export_xmp(xmp_file, extrinsics, intrinsics, pixel_size, image_size): our_sensor_width = pixel_size*image_size[0] our_sensor_height = pixel_size*image_size[1] our_focal = intrinsics[0,0] - + #turn focal from unit sensor into equivalent 35mm focal = our_focal*SENSOR_SIZE/our_sensor_width - # convert pp in pixel, + # pp relative principal_point_u = (intrinsics[0,2] - (our_sensor_width/2.0)) / our_sensor_width principal_point_v = (intrinsics[1,2] - (our_sensor_height/2.0)) / our_sensor_width + #-R^tT rotation = extrinsics[0:3,0:3] position = -extrinsics[0:3, 0:3].transpose()@extrinsics[0:3, 3] From 04340a5f8266e2d1847d6b666df199880c7db36c Mon Sep 17 00:00:00 2001 From: Matthieu Hog Date: Tue, 28 Jan 2025 11:53:55 +0100 Subject: [PATCH 31/32] added deep image matching things (roma experiments) --- .../CreateTracksFromWarps.py | 342 ++++++++++++ mrrs/deep_image_matching/DeepImageMatching.py | 520 ++++++++++++++++++ .../GeometricFilterMatch.py | 227 ++++++++ mrrs/deep_image_matching/TestSparseWarp.py | 154 ++++++ mrrs/deep_image_matching/__init__.py | 0 5 files changed, 1243 insertions(+) create mode 100644 mrrs/deep_image_matching/CreateTracksFromWarps.py create mode 100644 mrrs/deep_image_matching/DeepImageMatching.py create mode 100644 mrrs/deep_image_matching/GeometricFilterMatch.py create mode 100644 mrrs/deep_image_matching/TestSparseWarp.py create mode 100644 mrrs/deep_image_matching/__init__.py diff --git a/mrrs/deep_image_matching/CreateTracksFromWarps.py b/mrrs/deep_image_matching/CreateTracksFromWarps.py new file mode 100644 index 0000000..fbbb9f6 --- /dev/null +++ b/mrrs/deep_image_matching/CreateTracksFromWarps.py @@ -0,0 +1,342 @@ +__version__ = "3.0" + +import os + +from meshroom.core.plugin import PluginNode, EnvType +from meshroom.core import desc + + +class CreateTracksFromWarps(PluginNode): + + category = 'MRRS - Deep Matching' + documentation = '''''' + + envFile=os.path.join(os.path.dirname(__file__), 'minenv.yaml') + envType=EnvType.CONDA + + inputs = [ + desc.File( + name='inputSfMData', + label='inputSfMData', + description='SfMData file.', + value='' + ), + + desc.File( + name="matchFolder", + label="Match Folder", + description="", + value="" + ), + + desc.ChoiceParam( + name="mode", + label="mode", + description="", + value="clique", + values=["greedy", "clique"], + exclusive=True, + ), + + # desc.FloatParam( + # name='errorThreshold', + # label='errorThreshold', + # description=''' ''', + # value=1.0, + # range=(0.0, 1000000000.0, 0.1), + # advanced=True + # ), + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + + ), + + desc.ChoiceParam( + name='describerTypes', + label='describerTypes', + description="", + value='dspsift', + values=['dspsift'], + exclusive=True) + ] + + outputs = [ + desc.File( + name='output', + label='output', + description='output', + value=desc.Node.internalFolder, + group='', + ), + desc.File( + name="outputTracks", + label="Output tracks", + description="", + value=os.path.join(desc.Node.internalFolder, "tracks.json"), + ) + ] + + def processChunk(self, chunk): + """ + """ + import numpy as np + from mrrs.core.ios import open_exr + import json + + chunk.logManager.start(chunk.node.verboseLevel.value) + + chunk.logger.info("Opening files") + with open(chunk.node.inputSfMData.value, "r") as json_file: + sfm_data = json.load(json_file) + #sort by frame id + if "frameId" in sfm_data["views"][0].keys(): + sfm_data["views"] = sorted(sfm_data["views"], key= lambda x:int(x["frameId"])) + else: + chunk.logger.info("No frameId") + images_uids = [v["viewId"] for v in sfm_data["views"]] + nb_view = len(images_uids) + oW = int(sfm_data["views"][0]["width"]) + oH = int(sfm_data["views"][0]["height"]) + + chunk.logger.info(("Image size (wh): %d %d"%(oW,oH))) + + warp_files = [f for f in os.listdir(chunk.node.matchFolder.value) if f.endswith("_warp.exr")] + + chunk.logger.info("%d warps found"%(len(warp_files))) + + warps = {view_id_0:{view_id_1:[] for view_id_1 in images_uids } for i, view_id_0 in enumerate(images_uids)} + + for warp_file in warp_files: + uid_0, uid_1 = warp_file.replace("_warp.exr","").split("_") + # chunk.logger.info(uid_0+" "+uid_1) + w,_=open_exr(os.path.join(chunk.node.matchFolder.value, warp_file)) + H,W = w.shape[0:2] + w[:,:,0]*=W + w[:,:,1]*=H + warps[uid_0][uid_1] = w + + WORKING_REZ = (864,864) + + def to_1d(v,x,y): + return v*WORKING_REZ[0]*WORKING_REZ[1]+y*WORKING_REZ[1]+x + def from_1d(i): + v=np.asarray(i/(WORKING_REZ[0]*WORKING_REZ[1])).astype(np.int32) + y=np.asarray((i/WORKING_REZ[1])%WORKING_REZ[1]).astype(np.int32) + x=i%WORKING_REZ[1] + return v,x,y + + pys,pxs = np.meshgrid(np.arange(0,WORKING_REZ[0]), np.arange(0,WORKING_REZ[1]), indexing='ij') + pys=pys.flatten() + pxs=pxs.flatten() + + if chunk.node.mode.value == "clique": + import networkx as nx + match_graph = nx.Graph() + match_graph.add_nodes_from(range(nb_view*WORKING_REZ[0]*WORKING_REZ[1])) + for i,uid_0 in enumerate(images_uids): + for j,uid_1 in enumerate(images_uids): + if (uid_0 != uid_1) and (uid_0 in warps) and (uid_1 in warps[uid_0]) and len(warps[uid_0][uid_1])!= 0: + chunk.logger.info("Adding matches from %d %d"%(i,j)) + w = warps[uid_0][uid_1].reshape([-1,3]).astype(np.int32) + if len(w) == 0: + continue + idx_0 = to_1d(i,pys,pxs) + idx_1 = to_1d(j,w[:,1], w[:,0]) + + # print(w[100:110,0:2]) + # print("--") + # print(np.stack([pys,pxs], axis=-1)[100:110,:]) + # print("--") + # print(np.stack([idx_0[100:110], idx_1[100:110]],axis=-1)) + # break + #add edges for each match FIXME: could add a window FIXME: add weight + match_graph.add_edges_from(np.stack([idx_0, idx_1], axis=-1)) + chunk.logger.info("Find cliques") + #cliques suggest strong matches + cliques = list(nx.find_cliques(match_graph)) + chunk.logger.info("Done, %d cliques found"%len(cliques)) + selected_matches = [] + MIN_TRACK_SIZE=3 + for cl in cliques: + if len(cl)>=MIN_TRACK_SIZE: + selected_matches.append(cl) + chunk.logger.info("found %d tracks with more than %d matches"%(len(selected_matches), MIN_TRACK_SIZE)) + elif chunk.node.mode.value == "greedy": + chunk.logger.info("Greedy mode computing...") + + #init tracks with pixel grid + all_selected_matches = [[to_1d(0,x,y)] for x,y in zip(pxs, pys)] + MIN_CONF = 0.9 + #for each frame + for i in range(0, len(images_uids)-1): + chunk.logger.info("%d tracks"%(len(all_selected_matches))) + chunk.logger.info("%d/%d"%(i, len(images_uids))) + #get uit a i and i+1 + uid_0=images_uids[i] + uid_1=images_uids[i+1] + #warps fro->i+1: from each px of i-1 coresp coordinate in i + w = warps[uid_0][uid_1].reshape([-1,3]) + + #select last features of each track (sequenial mode) + prev_feature_idx = np.asarray([sm[-1] for sm in all_selected_matches]) + prev_vs, prev_xs, prev_ys = from_1d(prev_feature_idx) + print("%d active tracks "%prev_xs.shape[0]) + + #x,y coord at i, warp i->i+1 on i, wx wy, matchingcoord in i+1 + assert(pys.shape[0]==w.shape[0]) + for y,x,wy,wx,c in zip(pys, pxs, w[:,0], w[:,1], w[:,2]): + if c>MIN_CONF: #FIXME: hardcoded? + wx=int(np.round(wx)) + wy=int(np.round(wy))#indx at i+1 + # print(wy,wx,y,x) + + next_feat_idx = to_1d(i+1,wx,wy) + #if there is an ongoing track at i, get matching index + j=np.nonzero((x==prev_xs) & (y==prev_ys) & (prev_vs==i)) + if len(j[0])>=1:# + # print("adding %d %d "%(wx,wy)+str(i+1)+" to %d %d "%(x,y)+str(all_selected_matches[j[0][0]])) + all_selected_matches[j[0][0]].append(next_feat_idx) + elif len(j[0])==0:#else create new trak + # print("Create") + all_selected_matches.append([next_feat_idx]) + + + + # #init tracks with pixel grid + # all_selected_matches = [[to_1d(0,x,y)] for x,y in zip(pxs, pys)] + # MIN_CONF = 0.9 + # #for each frame + # for i in range(1, len(images_uids)): + # chunk.logger.info("%d tracks"%(len(all_selected_matches))) + # chunk.logger.info("%d/%d"%(i, len(images_uids))) + # #get uit a i and i-1 + # uid_0=images_uids[i-1] + # uid_1=images_uids[i] + # #warps fro i-1->i: from each px of i-1 coresp coordinate in i + # w = warps[uid_0][uid_1].reshape([-1,3]) + # #select last feature of each track (sequenial) + # prev_feature_idx = np.asarray([sm[-1] for sm in all_selected_matches]) + # #for now sequential, only interested in i-1 + # prev_vs, prev_xs, prev_ys = from_1d(prev_feature_idx) + # print("%d active tracks "%prev_xs.shape[0]) + + # #for each warped pixex + # #x,y coord at i, warp i-1->i on i, + # for y,x,wy,wx,c in zip(pys, pxs, w[:,1], w[:,0], w[:,2]): + # if c>MIN_CONF: #FIXME: hardcoded? + # wx=int(wx) + # wy=int(wy)# + # current_feat_idx = to_1d(i,x,y) + # #if there was an ongoing track at this pixel on i-1 + # j=np.nonzero((wx==prev_xs) & (wy==prev_ys) & (prev_vs==(i-1))) + # if len(j[0])>=1:# + # all_selected_matches[j[0][0]].append(current_feat_idx) + # elif len(j[0])==0:#else create new trak + # # print("Create") + # all_selected_matches.append([current_feat_idx]) + + + chunk.logger.info("Cleaning up one length traskcs from %d"%(len(all_selected_matches))) + selected_matches = [sm for sm in all_selected_matches if len(sm)>=2] + chunk.logger.info("%d remaining "%len(selected_matches)) + else: + raise RuntimeError("Invalid mode") + + chunk.logger.info("Tracks stats") + track_length={} + for sm in selected_matches: + if len(sm) in track_length.keys(): + track_length[len(sm)]+=1 + else: + track_length[len(sm)]=1 + for t in sorted(track_length.keys()): + chunk.logger.info("%d:%d"%(t, track_length[t])) + + # print(selected_matches) + + MAX_TRACK = 50000 + selected_matches = sorted(selected_matches, key=lambda x:len(x), reverse=True) + selected_matches=selected_matches[:MAX_TRACK] + + # print(selected_matches) + + chunk.logger.info("Tracks stats") + track_length={} + for sm in selected_matches: + if len(sm) in track_length.keys(): + track_length[len(sm)]+=1 + else: + track_length[len(sm)]=1 + for t in sorted(track_length.keys()): + chunk.logger.info("%d:%d"%(t, track_length[t])) + + chunk.logger.info("Writting %d tracks"%(len(selected_matches))) + #writing tracks + # os.makedirs(chunk.node.output.value, exist_ok=True) + tracks=[] + features_per_view = {} + sequ1d_to_feat_id = {} + clean_matches = { uid0: {uid1:[] for uid1 in images_uids} for uid0 in images_uids} + for i,matches in enumerate(selected_matches): + feats = [] + # print("Track %d:"%i) + for j,f in enumerate(matches): + view_index, x,y = from_1d(f) + # print(x,y) + v_uid = images_uids[view_index] + if not v_uid in features_per_view.keys(): + features_per_view[v_uid]=[] + f_uid = len(features_per_view[v_uid]) + sequ1d_to_feat_id[f]=f_uid + x= float(oH*x/WORKING_REZ[1]) + y= float(oW*y/WORKING_REZ[0]) + features_per_view[v_uid].append([x,y]) + feats.append([ int(v_uid), + {"featureId": int(f_uid), + "coords": [x,y], + "scale": float(0)} + ]) + if j>0: + pv_idx,_,_ = from_1d(matches[j-1])#previous feature in match + pv_uid = images_uids[pv_idx] #view uid of this feature + pf_uid = len(features_per_view[pv_uid])-1 #feature id in the view + clean_matches[pv_uid][v_uid].append([pf_uid, f_uid]) + + tracks.append([int(i), {"descType": chunk.node.describerTypes.value,"featPerView": feats}]) + + with open(chunk.node.outputTracks.value, "w") as tf: + json.dump(tracks, tf) + + #save features from only tracks + for v_uid in features_per_view: + kpts = features_per_view[v_uid] + with open(os.path.join(chunk.node.output.value,v_uid+"."+chunk.node.describerTypes.value+".feat"), "w") as kpf: + kpf.write("".join(["%f %f 0 0\n"%(kp[1], kp[0]) for kp in kpts ])) + + #save matches from only tracks, will only create match from a frame to the other + # for i,matches in enumerate(clean_matches): + # print(clean_matches) + with open(os.path.join(chunk.node.output.value,"0.matches.txt"), "a") as mf: + for uid0 in images_uids: + if uid0 in clean_matches.keys(): + for uid1 in images_uids: + if uid1 in clean_matches[uid0].keys(): + matches = clean_matches[uid0][uid1] + + if len(matches)>=1: + # print(matches) + num_kp = len(matches) + mf.write("%s %s\n"%(uid0, uid1)) + mf.write("1\n") + mf.write(chunk.node.describerTypes.value+" %d\n"%(num_kp))# + for m in matches:#save feature index with offset for each view + mf.write("%d %d\n"%(m[0], m[1])) + + chunk.logManager.end() + diff --git a/mrrs/deep_image_matching/DeepImageMatching.py b/mrrs/deep_image_matching/DeepImageMatching.py new file mode 100644 index 0000000..53d523d --- /dev/null +++ b/mrrs/deep_image_matching/DeepImageMatching.py @@ -0,0 +1,520 @@ +__version__ = "2.0" +import json +import os +from this import d + +from meshroom.core import desc + +from meshroom.core.plugin import PluginNode, EnvType + +MATCHER = [ + # Dense + "roma", "tiny-roma", "dust3r", "mast3r", + # Semi-dense + "loftr", "eloftr", "se2loftr", "aspanformer", "matchformer", "xfeat-star", + # Sparse + "sift-lg", "superpoint-lg", "disk-lg", "aliked-lg", "dedode-lg", "doghardnet-lg", "gim-lg", "xfeat-lg", + "dedode", "steerers", "dedode-kornia", "sift-nn", "orb-nn", "doghardnet-nn", "patch2pix", "superglue", + "r2d2", "d2net", "gim-dkm", "xfeat", "omniglue", "dedode-subpx", "xfeat-subpx", "aliked-subpx" + ] + + +class DeepImageMatching(PluginNode): + + category = 'MRRS - Deep Matching' + documentation = ''' ''' + gpu = desc.Level.INTENSIVE + + #overides the env path + envFile=os.path.join(os.path.dirname(__file__), 'env.yaml') + envType=EnvType.CONDA + + inputs = [ + desc.File( + name="inputSfMData", + label="SfMData", + description="Input SfMData file.", + value="", + ), + + # desc.File( + # name="input", + # label="inputPairs", + # description="Input image pairs", + # value="", + # ), + + desc.ChoiceParam( + name="matcher", + label="matcher", + description="matcher method", + value="roma", + values=MATCHER, + exclusive=True, + ), + + desc.ChoiceParam( + name="imageMatching", + label="imageMatching", + description="", + value="exhaustive_roundtrip", + values=["exhaustive", "exhaustive_roundtrip", "sequential", "star"], + exclusive=True, + ), + + desc.BoolParam( + name='rawMatches', + label='Save raw matches', + description='''''', + value=False, + advanced=True + ), + + desc.IntParam( + name='maxKeypoints', + label='maxKeypoints', + description='''''', + value=2048, + range=(0, 1000000000, 1), + advanced=True + ), + + # desc.BoolParam( + # name='removeInconsistantRawMatches', + # label='Remove Inconsistant Raw Matches', + # description='''''', + # value=False, + # advanced=True + # ), + + desc.BoolParam( + name='rawWarps', + label='Save raw warps', + description='''''', + value=False, + advanced=True + ), + + desc.ChoiceParam( + name="verboseLevel", + label="Verbose Level", + description="Verbosity level (fatal, error, warning, info, debug, trace).", + value="info", + values=["fatal", "error", "warning", "info", "debug", "trace"], + exclusive=True, + ) + ] + + outputs = [ + desc.File( + name="outputFolder", + label="Output Folder", + description="Path to a folder in which the computed results are stored.", + value=desc.Node.internalFolder, + visible=False + ), + desc.File( + name="imagePairs", + label="Image Pairs", + description="", + value=os.path.join(desc.Node.internalFolder, "imageMatches.txt"), + ), + desc.File( + name="featuresFolder", + label="Features Folder", + description="Path to a folder in which the features matches are stored.", + value=os.path.join(desc.Node.internalFolder, "features"), + ), + desc.File( + name="matchesFolder", + label="Matches Folder", + description="Path to a folder in which the computed matches are stored.", + value=os.path.join(desc.Node.internalFolder, "matches"), + ), + desc.File( + name="rawFeaturesFolder", + label="Raw features Folder", + description="Path to a folder in which the features matches are stored.", + value=os.path.join(desc.Node.internalFolder, "features_raw"), + ), + desc.File( + name="rawMatchesFolder", + label="Raw matches Folder", + description="Path to a folder in which the raw computed matches are stored.", + value=os.path.join(desc.Node.internalFolder, "matches_raw"), + ) + ] + + def processChunk(self, chunk): + """ + Computes the different transforms + """ + import numpy as np + + chunk.logManager.start(chunk.node.verboseLevel.value) + + #load sfmdata + chunk.logger.info("Loading sfm data") + with open(chunk.node.inputSfMData.value, "r") as json_file: + sfm_data = json.load(json_file) + #sort by frame id + if "frameId" in sfm_data["views"][0].keys(): + sfm_data["views"] = sorted(sfm_data["views"], key= lambda x:int(x["frameId"])) + nb_image = len(sfm_data["views"]) + images_paths = [v["path"] for v in sfm_data["views"]] + images_uids = [v["viewId"] for v in sfm_data["views"]] + oW,oH=int(sfm_data["views"][0]["width"]),int(sfm_data["views"][0]["height"]) + + #making image list wih diferent strategies + if chunk.node.imageMatching.value == "exhaustive": + image_pairs = [(i, j) for i in range(nb_image) for j in range(i+1, nb_image)] + elif chunk.node.imageMatching.value == "sequential": + image_pairs = [(i, i+1) for i in range(nb_image-1)] + elif chunk.node.imageMatching.value == "exhaustive_roundtrip": + image_pairs = [] + for i in range(nb_image): + for j in range(nb_image): + if i!=j: + image_pairs.append((i,j)) + elif chunk.node.imageMatching.value == "star": + raise RuntimeError('not implemented yet') + STEP = 20 + for i in range(nb_image): + if i%STEP == 0: + pass + else: + pass + + + + chunk.logger.info(" %d images, %d pairs"%(nb_image, len(image_pairs))) + + # #opening imagematching file if any + # if chunk.node.inputPairs.value != "": + # print("Opening imagepairs file:") + # image_pairs=open_image_grapĥ(imagepairs, nb_image) + + #saving pairs again + with open(chunk.node.imagePairs.value , "w") as mf: + for view_index_0 in range(nb_image): + matched_views = [images_uids[p[1]] for p in image_pairs if p[0] == view_index_0] + if len(matched_views)>0: + mf.write(images_uids[view_index_0]) + for m in matched_views: + mf.write(" "+m) + mf.write("\n") + + # #opening masks if any + # masks = {} + # if maskfolder: + # from PIL import Image + # for view_id in all_view_ids: + # masks[view_id] = np.array(Image.open(os.path.join(maskfolder, view_id+".png")), dtype=np.bool_) + + #creates output folders + chunk.logger.info("Creating output folders") + os.makedirs(chunk.node.featuresFolder.value, exist_ok=True) + os.makedirs(chunk.node.rawFeaturesFolder.value, exist_ok=True) + os.makedirs(chunk.node.matchesFolder.value, exist_ok=True) + os.makedirs(chunk.node.rawMatchesFolder.value, exist_ok=True) + + if chunk.node.matcher.value == "roma": + #Load Model + chunk.logger.info("Loading model") + device = 'cuda' + + from romatch import roma_outdoor + matcher = roma_outdoor(device=device) + + #Loop over image pairs + chunk.logger.info("Running image matching for %d "%len(image_pairs)) + desc_type = "sift" + extention = "."+desc_type+".feat" #FIXME: for now we write as sift + nb_features = [0 for _ in range(nb_image)] + + for i, (view_index_0, view_index_1) in enumerate(image_pairs): + chunk.logger.info(" Matching :"+str(view_index_0)+" "+str(view_index_1)+"(%d/%d)"%(i,len(image_pairs))) + #working resolution + H,W = 864,864 + warp, certainty = matcher.match(images_paths[view_index_0], images_paths[view_index_1], device=device) + warp01 = warp[:, :W, :] + # warp10 = warp[:, W:, :] + certainty01 = certainty[:, :W] + # certainty10 = certainty[:, W:] + chunk.logger.info(" Done") + # Sample matches for estimation + chunk.logger.info(" Sampling for %d keypoints "%(chunk.node.maxKeypoints.value)) + matches01, matches_certainty01 = matcher.sample(warp01, certainty01, num=chunk.node.maxKeypoints.value) + # matches10, matches_certainty10 = matcher.sample(warp10, certainty10, num=chunk.node.maxKeypoints.value) + chunk.logger.info(" Done") + + # X = torch.round(((matches01[:, 0] * 864) + 863) / 2).to(torch.int32) + # Y = torch.round(((matches01[:, 1] * 864) + 863) / 2).to(torch.int32) + # X2 = torch.round(((matches01[:, 2] * 864) + 863) / 2).to(torch.int32) + # Y2 = torch.round(((matches01[:, 3] * 864) + 863) / 2).to(torch.int32) + # val = warp01[Y, X, 2] + # val = ((val * 864) + 863) / 2 + # print(torch.max(torch.abs(val - X2))) + # val = warp01[Y, X, 3] + # val = ((val * 864) + 863) / 2 + # print(torch.max(val - Y2)) + # exit(0) + + #convertions + #matches in px + matches01 = ((matches01* 864) + 863) / 2 + # matches10 = ((matches10* 864) + 863) / 2 + #warps in px + warp01= ((warp01[:,:,2:4]* 864+863)/2).detach().cpu().numpy() #0:1 is in original image? + # warp10 = ((warp10[:,:,2:4]* 864+863)/2).detach().cpu().numpy() + matches01=matches01.detach().cpu().numpy() + # matches10=matches10.detach().cpu().numpy() + certainty01=certainty01.detach().cpu().numpy() + # certainty10=certainty10.detach().cpu().numpy() + + #sanity check + X = np.round(matches01[:, 0]).astype(np.int32) + Y = np.round(matches01[:, 1]).astype(np.int32) + X2 = np.round(matches01[:, 2]).astype(np.int32) + Y2 = np.round(matches01[:, 3]).astype(np.int32) + val = warp01[Y, X,0] + max_dist_x=(np.max(np.abs(val - X2))) + val = warp01[Y, X,1] + max_dist_y=(np.max(np.abs(val - Y2))) + print(max_dist_x) + print(max_dist_y) + assert(max_dist_x<1) + assert(max_dist_y<1) + + #write sampled/filterer matches + kpts01_0 = matches01[:,0:2] + kpts01_1 = matches01[:,2:4] + #put it in original image coordinates for meshroom + kpts01_0[:,0] = oW*(kpts01_0[:,0]/W) + kpts01_0[:,1] = oH*(kpts01_0[:,1]/H) + kpts01_1[:,0] = oW*(kpts01_1[:,0]/W) + kpts01_1[:,1] = oH*(kpts01_1[:,1]/H) + num_kp = kpts01_0.shape[0] + chunk.logger.info(" Saving %d matches"%num_kp) + with open(os.path.join(chunk.node.featuresFolder.value,images_uids[view_index_0]+extention), "a") as kpf: + for kp in kpts01_0: + kpf.write("%f %f 0 0\n"%(kp[0], kp[1])) + with open(os.path.join(chunk.node.featuresFolder.value,images_uids[view_index_1]+extention), "a") as kpf: + for kp in kpts01_1: + kpf.write("%f %f 0 0\n"%(kp[0], kp[1])) + with open(os.path.join(chunk.node.matchesFolder.value,"0.matches.txt"), "a") as mf: + mf.write("%s %s\n"%(images_uids[view_index_0], images_uids[view_index_1])) + mf.write("1\n") + mf.write("sift %d\n"%(num_kp)) + for match_indx in range(num_kp): + kp0_idx = match_indx+nb_features[view_index_0] + kp1_idx = match_indx+nb_features[view_index_1] + mf.write("%d %d\n"%(kp0_idx, kp1_idx)) + #update index offset + nb_features[view_index_0]+=num_kp + nb_features[view_index_1]+=num_kp + chunk.logger.info(" Done") + + #saving warp image between 0-1 + if chunk.node.rawWarps.value : + import OpenEXR#lazy import + chunk.logger.info(" Saving raw matches images") + + warppath01=os.path.join(chunk.node.rawMatchesFolder.value, images_uids[view_index_0]+"_"+images_uids[view_index_1]+"_warp.exr") + # warppath10=os.path.join(chunk.node.rawMatchesFolder.value, images_uids[view_index_1]+"_"+images_uids[view_index_0]+"_warp.exr") + def save_exr(image, path): + h={"compression": OpenEXR.ZIP_COMPRESSION, "type":OpenEXR.scanlineimage} + c={"RGB": image} + with OpenEXR.File(h,c) as image: + image.write(path) + save_exr(np.concatenate([warp01/H, np.expand_dims(certainty01, axis=-1)], axis=-1), warppath01 ) + # save_exr(np.concatenate([warp10/H, np.expand_dims(certainty10, axis=-1)], axis=-1), warppath10 ) + chunk.logger.info(" Done") + + #old code + # if chunk.node.matcher.value == "roma": #if dense + # W = int(matcher._certainty.shape[1]/2) #width after resize + # H = matcher._certainty.shape[0] + # chunk.logger.info(" Output certainty image size %d %d "%(W,H)) + # chunk.logger.info(" Output warp image size %d %d "%(W,H)) + + # ##write raw matches with confidence + # if chunk.node.rawMatches.value or chunk.node.rawWarps.value : + # chunk.logger.info(" Saving %d raw matches working rez (%d, %d), for image res (%d,%d)"%(H*W, H, W, oH, oW)) + # #warp_021 specifies im1 pixel locations x and y, which are used to interpolate the im0 values, between 0 and 1 + # warp_021 = (matcher._warp[:,:W, 2:]+1)/2#pass from -1 1 to 0 1 + # certainty_01 = matcher._certainty[:, :W].cpu().detach().numpy() + # #turn into px coordinates (original image size), NOTE: also pass xy=>yx + # warp_021_px = torch.stack([torch.clamp(oH*warp_021[:,:,1], 0, oH-1), + # torch.clamp(oW*warp_021[:,:,0], 0, oW-1)], axis=-1).int() + # # + # warp_120 = (matcher._warp[:, W:,:2]+1)/2 + # warp_120_px = torch.stack([torch.clamp(H*warp_120[:,:,0], 0, H-1), + # torch.clamp(W*warp_120[:,:,1], 0, W-1)], axis=-1).int() + + # certainty_10 = matcher._certainty[:, W:].cpu().detach().numpy() + + # chunk.logger.info(" Median certainty: %f "%np.median(certainty_01.flatten())) + + # if chunk.node.rawWarps.value : + # import OpenEXR#lazy import + # chunk.logger.info(" Saving raw matches images") + # warppath10=os.path.join(chunk.node.rawMatchesFolder.value, images_uids[view_index_1]+"_"+images_uids[view_index_0]+"_warp") + # warppath01=os.path.join(chunk.node.rawMatchesFolder.value, images_uids[view_index_0]+"_"+images_uids[view_index_1]+"_warp") + # confpath10 = os.path.join(chunk.node.rawMatchesFolder.value, "conf_"+images_uids[view_index_1]+"_"+images_uids[view_index_0]+".exr") + # confpath01 = os.path.join(chunk.node.rawMatchesFolder.value, "conf_"+images_uids[view_index_0]+"_"+images_uids[view_index_1]+".exr") + # warp_021_px_smoll = torch.stack([torch.clamp(H*warp_021[:,:,1], 0, H-1), + # torch.clamp(W*warp_021[:,:,0], 0, W-1)], axis=-1).cpu().detach().numpy() + # warp_012_px_smoll = torch.stack([torch.clamp(H*warp_120[:,:,1], 0, H-1), + # torch.clamp(W*warp_120[:,:,0], 0, W-1)], axis=-1).cpu().detach().numpy() + # h={"compression": OpenEXR.ZIP_COMPRESSION, "type":OpenEXR.scanlineimage} + # c={"RGB":np.concatenate([warp_021_px_smoll/H, np.expand_dims(certainty_01, axis=-1)], axis=-1)} + # with OpenEXR.File(h,c) as image: + # image.write(warppath10+".exr") + # c={"RGB":np.concatenate([warp_012_px_smoll/H, np.expand_dims(certainty_10, axis=-1)], axis=-1)} + # with OpenEXR.File(h,c) as image: + # image.write(warppath01+".exr") + # # c={"Y":warp_021_px_smoll[:,:,0]/H} + # # with OpenEXR.File(h,c) as image: + # # image.write(warppath10+"_x.exr") + # # c={"Y":warp_021_px_smoll[:,:,1]/W}S + # # with OpenEXR.File(h,c) as image: + # # image.write(warppath10+"_y.exr") + # # c={"Y":warp_012_px_smoll[:,:,0]/H} + # # with OpenEXR.File(h,c) as image: + # # image.write(warppath01+"_x.exr") + # # c={"Y":warp_012_px_smoll[:,:,1]/W} + # # with OpenEXR.File(h,c) as image: + # # image.write(warppath01+"_y.exr") + # # c={"Y":certainty_01} + # # with OpenEXR.File(h,c) as image: + # # image.write(confpath10) + # # c={"Y":certainty_10} + # # with OpenEXR.File(h,c) as image: + # # image.write(confpath01) + + # #warpdebug + # import torchvision + # # warppathviz01 = os.path.join(chunk.node.rawMatchesFolder.value, "warpviz_"+images_uids[view_index_0]+"_"+images_uids[view_index_1]+".png") + # # warppathviz10 = os.path.join(chunk.node.rawMatchesFolder.value, "warpviz_"+images_uids[view_index_1]+"_"+images_uids[view_index_0]+".png") + # # confvizpath0 = os.path.join(chunk.node.rawMatchesFolder.value, "conf_"+images_uids[view_index_1]+".png") + # # confvizpath1 = os.path.join(chunk.node.rawMatchesFolder.value, "conf_"+images_uids[view_index_0]+".png") + # # torchvision.utils.save_image(torch.from_numpy(certainty_01), confvizpath0) + # # torchvision.utils.save_image(torch.from_numpy(certainty_10), confvizpath1) + # # #resize the input image to W, H, because it could have been resized + # # warped_im1 = nimg1[:, warp_021_px_smoll[:,:,0], warp_021_px_smoll[:,:,1]] #warp is x y?? + # # warped_im0 = nimg0[:, warp_012_px_smoll[:,:,0], warp_012_px_smoll[:,:,1]] + # print(os.path.join(chunk.node.rawMatchesFolder.value,images_uids[view_index_0]+".png")) + # nimg0=torch.nn.functional.interpolate(torch.unsqueeze(img0, axis =0),size=(H,W), align_corners=False,mode="bilinear", )[0] + # nimg1=torch.nn.functional.interpolate(torch.unsqueeze(img1, axis =0),size=(H,W), align_corners=False,mode="bilinear", )[0] + # torchvision.utils.save_image(nimg0, os.path.join(chunk.node.rawMatchesFolder.value,images_uids[view_index_0]+".png")) + # torchvision.utils.save_image(nimg1, os.path.join(chunk.node.rawMatchesFolder.value,images_uids[view_index_1]+".png")) + # # torchvision.utils.save_image(warped_im0, warppathviz01) + # # torchvision.utils.save_image(warped_im1, warppathviz10) + + # if chunk.node.rawMatches.value : + # if i == 0: #making dense kps files once + # # summary(matcher, img0, img1) + # chunk.logger.info(" Initializing dense keypoint files") + # kpts0_y,kpts0_x = torch.meshgrid((torch.arange(0, oH), torch.arange(0, oW)), indexing="ij") + # kpts0 = torch.stack([kpts0_y.flatten(), kpts0_x.flatten()], axis=-1) + # chunk.logger.info(" Creating dummy %d kp"%kpts0_x.flatten().shape[0]) + # kp0_string = "".join(["%f %f 0 0\n"%(kp[1], kp[0]) for kp in kpts0 ]) + + # chunk.logger.info(" Saving files") + # for j, uid in enumerate(images_uids): + # chunk.logger.info(" %d/%d "%(j,len(images_uids))) + # with open(os.path.join(chunk.node.rawFeaturesFolder.value,uid+extention), "w") as kpf: + # kpf.write(kp0_string) + # # with open(os.path.join(chunk.node.rawFeaturesFolder.value,uid+"."+desc_type+".desc"), "wb") as df: + # # df.write(d_string) + + # #meshrif in working res H W, but coordinates in original res oW oH + # kpts0_y,kpts0_x = np.meshgrid(np.round(oH*np.arange(0,1,1.0/H)), + # np.round(oW*np.arange(0,1,1.0/W)), + # indexing='ij') + + # kpts0_indices = kpts0_y.flatten()*oW+kpts0_x.flatten() + # #keypoint in image 1 from waprs + # kpts1=warp_021_px.reshape([-1,2]).cpu().detach().numpy() + # kpts1_indices=kpts1[:,0]*oW+kpts1[:,1] + # #free sanity check + # if kpts0_indices.shape[0] != kpts1_indices.shape[0] != certainty_01.flatten().cpu().shape[0]: + # raise RuntimeError("Invalid keypoints shape") + # if np.any(kpts0_indices>=oH*oW) or np.any(kpts1_indices>=oH*oW): + # raise RuntimeError("Keypoint outside of image") + + # certainty_01=certainty_01.flatten() + + # if chunk.node.removeInconsistantRawMatches.value: + # py,px = np.meshgrid(np.arange(0,H), np.arange(0,W), indexing='ij') + # warp_021_px_smoll = torch.stack([torch.clamp(H*warp_021[:,:,1], 0, H-1), + # torch.clamp(W*warp_021[:,:,0], 0, W-1)], axis=-1).cpu().detach().numpy() + # warp_120_px_smoll = torch.stack([torch.clamp(H*warp_120[:,:,1], 0, H-1), + # torch.clamp(W*warp_120[:,:,0], 0, W-1)], axis=-1).cpu().detach().numpy() + # warp_021_px_smoll = np.round(warp_021_px_smoll).astype(np.int32) + # warp_120_px_smoll = np.round(warp_120_px_smoll).astype(np.int32) + # dist=np.sum((warp_021_px_smoll[warp_120_px_smoll[:,:,0], warp_120_px_smoll[:,:,1], :]-np.stack([py,px], axis=-1))**2,axis=-1).flatten() + # # print(dist) + # THRESHOLD=2 + # valid=dist=THRESHOLD))) + # kpts0_indices=kpts0_indices[valid] + # kpts1_indices=kpts1_indices[valid] + # certainty_01=certainty_01[valid] + + + # #writting keypoints as meshroom format + # os.makedirs(chunk.node.rawMatchesFolder.value, exist_ok=True) + # with open(os.path.join(chunk.node.rawMatchesFolder.value,"0.matches.txt"), "a") as mf: + # mf.write("%s %s\n"%(images_uids[view_index_0], images_uids[view_index_1])) + # mf.write("1\n") + # mf.write("sift %d\n"%(kpts1_indices.shape[0]))#for now we disguise as sift + # #FIXME: cetrainty with kpts0 inside? + # for kp0_idx,kp1_idx,c in zip(kpts0_indices, kpts1_indices, certainty_01): + # mf.write("%d %d %f\n"%(kp0_idx, kp1_idx, c)) + else: #other sparse descriptors in the lib + chunk.logger.info("Matching with other matcher") + from matching import get_matcher + matcher = get_matcher(chunk.node.matcher.value, device=device) + chunk.logger.info(" Load images "+images_paths[view_index_0]+" "+images_paths[view_index_1]) + img0 = matcher.load_image(images_paths[view_index_0], resize=None) + img1 = matcher.load_image(images_paths[view_index_1], resize=None) + oW = img0.shape[2] + oH = img0.shape[1] + chunk.logger.info(" Image size %d %d "%(oW,oH)) + chunk.logger.info(" Computing matches...") + result = matcher(img0, img1) + + num_kp, H, kpts0, kpts1 = result['num_inliers'], result['H'], result['inlier_kpts0'], result['inlier_kpts1'] + # write all keypoints in image 0 and 1 + # all matcher keypoint (y,x) + kpts0=result['matched_kpts0'] + kpts1=result['matched_kpts1'] + num_kp = result['matched_kpts0'].shape[0] + with open(os.path.join(chunk.node.featuresFolder.value,images_uids[view_index_0]+extention), "a") as kpf: + for kp in kpts0: + kpf.write("%f %f 0 0\n"%(kp[0], kp[1])) + with open(os.path.join(chunk.node.featuresFolder.value,images_uids[view_index_1]+extention), "a") as kpf: + for kp in kpts1: + kpf.write("%f %f 0 0\n"%(kp[0], kp[1])) + #write matches + #Write matches, note "0." beacause mewhroom suports several matches files for batching + with open(os.path.join(chunk.node.matchesFolder.value,"0.matches.txt"), "a") as mf: + mf.write("%s %s\n"%(images_uids[view_index_0], images_uids[view_index_1])) + mf.write("1\n") + mf.write("sift %d\n"%(num_kp))#for now we disguise as sift + for match_indx in range(num_kp):#save feature index with offset for each view + kp0_idx = match_indx+nb_features[view_index_0] + kp1_idx = match_indx+nb_features[view_index_1] + mf.write("%d %d\n"%(kp0_idx, kp1_idx)) + #update index offset + nb_features[view_index_0]+=num_kp + nb_features[view_index_1]+=num_kp + print("Done") + + chunk.logManager.end() + + diff --git a/mrrs/deep_image_matching/GeometricFilterMatch.py b/mrrs/deep_image_matching/GeometricFilterMatch.py new file mode 100644 index 0000000..41eb9f9 --- /dev/null +++ b/mrrs/deep_image_matching/GeometricFilterMatch.py @@ -0,0 +1,227 @@ +__version__ = "3.0" + +from crypt import methods +import os + +from meshroom.core.plugin import PluginNode, EnvType +from meshroom.core import desc + +class GeometricFilterMatch(PluginNode): + + category = 'MRRS - Deep Matching' + documentation = '''''' + + envFile=os.path.join(os.path.dirname(__file__), 'minenv.yaml') + envType=EnvType.CONDA + + inputs = [ + desc.File( + name='inputSfM', + label='SfMData', + description='SfMData file.', + value='' + ), + + desc.File( + name="featureFolder", + label="Feature Folder", + description="Featurefolder", + value="" + ), + + desc.File( + name="matcheFolder", + label="Match Folder", + description="", + value="" + ), + + desc.File( + name="rawMatcheFolder", + label="Raw Match Folder", + description="", + value="" + ), + + desc.File( + name="rawFeatureFolder", + label="raw feature Folder", + description="Featurefolder", + value="" + ), + + desc.ChoiceParam( + name='method', + label='method', + description='', + value='roundtrip', + values=['roundtrip', 'fundamental'], + exclusive=True, + joinChar=',', + ), + + desc.FloatParam( + name='errorThreshold', + label='errorThreshold', + description=''' ''', + value=1.0, + range=(0.0, 1000000000.0, 0.1), + advanced=True + ), + + desc.BoolParam( + name='saveConfidence', + label='saveConfidence', + description=''' ''', + value=True, + advanced=True + ), + + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + + ), + + ] + + outputs = [ + desc.File( + name='outputFolder', + label='outputFolder', + description='outputFolder', + value=desc.Node.internalFolder, + group='', + ), + desc.File( + name="outputMatchesFolders", + label="Matches Folder", + description="Path to a folder in which the computed matches are stored.", + value=os.path.join(desc.Node.internalFolder, "matches"), + ), + ] + + def open_matches_from_warp(): + + + + def processChunk(self, chunk): + """ + """ + import numpy as np + from mrrs.core.ios import open_matches + from mrrs.core.utils import time_it + from mrrs.core.geometry import make_homogeneous + import cv2 + from cv2 import findFundamentalMat + chunk.logManager.start(chunk.node.verboseLevel.value) + + chunk.logger.info("Opening files") + matches={} + WORKING_RES = 864 + with time_it() as t: + match_file = [os.path.join(chunk.node.matcheFolder.value, mf) for mf in os.listdir(chunk.node.matcheFolder.value) if mf.endswith(".txt")][0] + # match_file_raw = [os.path.join(chunk.node.rawMatcheFolder.value, mf) for mf in os.listdir(chunk.node.rawMatcheFolder.value) if mf.endswith(".txt")][0] + chunk.logger.info('Opening matches from '+match_file+' and '+match_file_raw ) + matches, images_uids = open_matches(match_file) + # raw_matches, images_uids = open_matches(match_file_raw) + + chunk.logger.info('Opening feature files') + features = {uid:np.loadtxt(os.path.join(chunk.node.featureFolder.value, uid+".sift.feat")) for uid in images_uids } + # features_raw = {uid:np.loadtxt(os.path.join(chunk.node.rawFeatureFolder.value, uid+".sift.feat")) for uid in images_uids } + + features_raw = np.meshgrid(np.round(oH*np.arange(0,1,1.0/H)), + np.round(oW*np.arange(0,1,1.0/W)), + indexing='ij') + raw_matches, features_raw = open_matches_from_warp() + chunk.logger.info("Done in %fs"%t) + + #for each pair of images + if 'fundamental' in chunk.node.method.value: + clean_matches= {view_id_0:{view_id_1:[] for view_id_1 in images_uids } for i, view_id_0 in enumerate(images_uids)} + for i, view_id_0 in enumerate(matches.keys()): + chunk.logger.info("%d/%d"%(i, len(matches.keys()))) + for j,view_id_1 in enumerate(matches[str(int(view_id_0))].keys()): + chunk.logger.info(" %d/%d"%(j, len(matches[str(int(view_id_0))].keys()))) + #find funcdamentaly matrix for filtered matches + matches01 = matches[str(int(view_id_0))][str(int(view_id_1))] + kp_idx_0 = matches01[:,0].astype(np.int32) + kp_idx_1 = matches01[:,1].astype(np.int32) + keypoints_0 = features[view_id_0][kp_idx_0,0:2] + keypoints_1 = features[view_id_1][kp_idx_1,0:2] + F, _ = findFundamentalMat(keypoints_0, keypoints_1, cv2.RANSAC) + import cv2 + from cv2 import findFundamentalMat + chunk.logger.info(" F:") + chunk.logger.info(F) + # filter raw matches outliers + raw_matches01 = raw_matches[str(int(view_id_0))][str(int(view_id_1))] + raw_kp_idx_0=raw_matches01[:,0].astype(np.int32) + raw_kp_idx_1=raw_matches01[:,1].astype(np.int32) + raw_keypoints_0 = features_raw[view_id_0][raw_kp_idx_0,0:2] + raw_keypoints_1 = features_raw[view_id_1][raw_kp_idx_1,0:2] + # compute epipolar error + #epipolar lines + l1 = make_homogeneous(raw_keypoints_0)@F.T + l0 = make_homogeneous(raw_keypoints_1)@F + #distance of each match point to line + def distance_point_to_line(p,l): + #from each point in p [N*3], compute the reprojection error from the epipolar line l [N*3] + return np.abs(np.sum(l*make_homogeneous(p), axis=1))/np.sqrt(np.sum(np.power(l[:,0:2],2), axis=1)) + error = (distance_point_to_line(raw_keypoints_0, l0)+distance_point_to_line(raw_keypoints_1, l1))/2.0 + + chunk.logger.info(" Mean/Med %f , %f"%(np.mean(error), np.median(error))) + + THRESHOLD= chunk.node.errorThreshold.value + match_selection = raw_matches01[error Date: Tue, 28 Jan 2025 11:55:10 +0100 Subject: [PATCH 32/32] added nodes to prune matches and create tracks --- mrrs/deep_feature_matching/MatchPruning.py | 154 ++++++++++++++++++ mrrs/deep_feature_matching/TrackCreation.py | 136 ++++++++++++++++ mrrs/deep_feature_matching/VizFeatures.py | 82 ++++++---- mrrs/deep_feature_matching/VizTracks.py | 9 +- mrrs/deep_feature_matching/env.yaml | 1 - .../kornia_wrappers/__init__.py | 0 .../kornia_wrappers/utils.py | 67 -------- 7 files changed, 338 insertions(+), 111 deletions(-) create mode 100644 mrrs/deep_feature_matching/MatchPruning.py create mode 100644 mrrs/deep_feature_matching/TrackCreation.py create mode 100644 mrrs/deep_feature_matching/kornia_wrappers/__init__.py diff --git a/mrrs/deep_feature_matching/MatchPruning.py b/mrrs/deep_feature_matching/MatchPruning.py new file mode 100644 index 0000000..42e0983 --- /dev/null +++ b/mrrs/deep_feature_matching/MatchPruning.py @@ -0,0 +1,154 @@ +__version__ = "3.0" + +import os + +from meshroom.core.plugin import PluginNode, EnvType +from meshroom.core import desc + +class MatchPruning(PluginNode): + + category = 'MRRS - Deep Matching' + documentation = '''''' + + envFile=os.path.join(os.path.dirname(__file__), 'vizenv.yaml') + envType=EnvType.CONDA + + inputs = [ + desc.File( + name='inputSfM', + label='SfMData', + description='SfMData file.', + value='' + ), + + desc.File( + name="featureFolder", + label="Feature Folder", + description="Featurefolder", + value="" + ), + + desc.File( + name="matcheFolder", + label="Match Folder", + description="Featurefolder", + value="", + + ), + + desc.IntParam( + name="minMatches", + label="minMatches", + description="Minimum matches to keep for a single view", + range=(1,10000000,1), + value=0 + ), + + desc.FloatParam( + name="confThreshold", + label="confThreshold", + description="Confidence threshold used to keep matches whatsoever", + range=(0.0,1.0,0.01), + value=0.0 + ), + + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + + ), + + ] + + outputs = [ + desc.File( + name='outputFolder', + label='outputFolder', + description='outputFolder', + value=desc.Node.internalFolder, + group='', + ), + desc.File( + name="outputMatchesFolders", + label="Matches Folder", + description="Path to a folder in which the computed matches are stored.", + value=os.path.join(desc.Node.internalFolder, "matches"), + ), + ] + + def processChunk(self, chunk): + """ + """ + import numpy as np + from mrrs.core.ios import open_matches + from mrrs.core.utils import time_it + chunk.logManager.start(chunk.node.verboseLevel.value) + + chunk.logger.info("Opening files") + matches={} + with time_it() as t: + feature_files = os.listdir(chunk.node.featureFolder.value) + print("%d feature files detected"%len(feature_files)) + match_file = [os.path.join(chunk.node.matcheFolder.value, mf) for mf in os.listdir(chunk.node.matcheFolder.value) if mf.endswith(".txt")][0] + chunk.logger.info('Opening matches from '+match_file) + matches_flatten, images_uids, total_nb_match_per_view = open_matches(match_file, flatten=True) + print(matches_flatten.shape) + print(t) + + chunk.logger.info("Pruning") + def prune(matches_flatten, total_nb_match_per_view): + i = 0 + CONF_TH = chunk.node.confThreshold.value + MIN_MATCH = chunk.node.minMatches.value + nb_invalid_matchs=np.count_nonzero(matches_flatten[:,-1] MIN_MATCH) and (total_nb_match_per_view[view_id_1] > MIN_MATCH): + continue + to_keep.append(i) + nb_to_rm = matches_flatten.shape[0]-len(to_keep) + chunk.logger.info("Removing %d weak and redondant matches, keeping %d"%(nb_to_rm, len(to_keep))) + matches_flatten=matches_flatten[to_keep,:] + return matches_flatten + + matches_flatten = prune(matches_flatten, total_nb_match_per_view) + print(matches_flatten.shape) + print("Writting") + #writting remaining matches + out_matches_folder = chunk.node.outputMatchesFolders.value + + #reshaping into dic of dic of list + clean_matches= {view_id_0:{view_id_1:[] for view_id_1 in images_uids } for i, view_id_0 in enumerate(images_uids)} + for m in matches_flatten: + clean_matches[str(int(m[0]))][str(int(m[1]))].append(m[2:4]) + + #FIXME: call fc + os.makedirs(out_matches_folder, exist_ok=True) + with open(os.path.join(out_matches_folder,"0.matches.txt"), "a") as mf: + for view_id_0 in clean_matches.keys(): + for view_id_1 in list(clean_matches.keys()): + ms = clean_matches[str(int(view_id_0))][str(int(view_id_1))] + if len(ms) == 0:#skipping views without matches + continue + mf.write("%s %s\n"%(view_id_0, view_id_1)) + mf.write("1\n") + mf.write("sift %d\n"%(len(ms)))#for now we disguise as sift + for kp0_idx,kp1_idx in ms: + mf.write("%d %d\n"%(kp0_idx, kp1_idx)) + + chunk.logManager.end() + diff --git a/mrrs/deep_feature_matching/TrackCreation.py b/mrrs/deep_feature_matching/TrackCreation.py new file mode 100644 index 0000000..02ac19f --- /dev/null +++ b/mrrs/deep_feature_matching/TrackCreation.py @@ -0,0 +1,136 @@ +__version__ = "3.0" + +import os + +from meshroom.core.plugin import PluginNode, EnvType +from meshroom.core import desc + +class TrackCreation(PluginNode): + + category = 'MRRS - Deep Matching' + documentation = '''''' + + envFile=os.path.join(os.path.dirname(__file__), 'vizenv.yaml') + envType=EnvType.CONDA + + inputs = [ + desc.File( + name='inputSfM', + label='SfMData', + description='SfMData file.', + value='' + ), + + desc.File( + name="featureFolder", + label="Feature Folder", + description="Featurefolder", + value="" + ), + + desc.File( + name="matcheFolder", + label="Match Folder", + description="Featurefolder", + value="", + + ), + + desc.IntParam( + name="minTracks", + label="minTracks", + description="Minimum number of tracks for a single view", + range=(1,10000000,1), + value=0 + ), + + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='''verbosity level (fatal, error, warning, info, debug, trace).''', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + + ), + + ] + + outputs = [ + desc.File( + name='tracksFile', + label='tracksFile', + description='tracksFile', + value=os.path.join(desc.Node.internalFolder,"tracksFile.json"), + group='', + ) + ] + + def processChunk(self, chunk): + """ + """ + import numpy as np + from mrrs.core.ios import open_matches + from mrrs.core.utils import time_it + import json + chunk.logManager.start(chunk.node.verboseLevel.value) + + # sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) + + chunk.logger.info("Opening files") + with time_it() as t: + feature_files = os.listdir(chunk.node.featureFolder.value) + print("%d feature files detected"%len(feature_files)) + match_file = [os.path.join(chunk.node.matcheFolder.value, mf) for mf in os.listdir(chunk.node.matcheFolder.value) if mf.endswith(".txt")][0] + chunk.logger.info('Opening matches from '+match_file) + matches_flatten, images_uids, total_nb_match_per_view = open_matches(match_file, flatten=True) + + chunk.logger.info("Creating tracks") + + import networkx as nx + from networkx.utils import UnionFind + + def flat_matches_to_keys(m): + """note: we always save all dense featues in feature files, soindex is static. + key is uid_view+"_"+index_match + """ + return [str(int(m[0]))+"_"+str(int(m[2])), str(int(m[1]))+"_"+str(int(m[2]))] + + def add_edges_and_union(matches_flatten, union_find): + """Add edges to the graph based on pairwise matches and perform union operations.""" + for m in matches_flatten: + k1, k2 = flat_matches_to_keys(m) + # Perform union in Union-Find + #ie => will happen to set with k1 or k2, or will make new set + union_find.union(k1, k2) + + + #NOTE: no weigts for root, issue=weigts are for nodes=>feaures, not maches + + # Initialize graph and union-find structure + graph = nx.Graph() + #generate unique feature keys + #all_features_keys = [flat_matches_to_keys(m) for m in matches_flatten].reshape(-1) + + #union find object + union_find = UnionFind() + + # Add edges and perform union operations based on pairwise matches, from most confidence to least + add_edges_and_union(matches_flatten[::-1], graph, union_find) + + # Retrieve connected components + tracks = list(union_find.to_sets()) + + #save tracks + track_data=[] + for i, track in enumerate(tracks): + + for feature in track: + + import json + with open(chunk.node.tracksFile.value, 'w') as f: + json.dump(data, f) + + chunk.logManager.end() + + \ No newline at end of file diff --git a/mrrs/deep_feature_matching/VizFeatures.py b/mrrs/deep_feature_matching/VizFeatures.py index c362625..59d2770 100644 --- a/mrrs/deep_feature_matching/VizFeatures.py +++ b/mrrs/deep_feature_matching/VizFeatures.py @@ -10,7 +10,7 @@ class VizFeatures(PluginNode): category = 'MRRS - Deep Matching' documentation = '''''' - envFile=os.path.dirname(__file__), 'env.yaml' + envFile=os.path.join(os.path.dirname(__file__), 'vizenv.yaml') envType=EnvType.CONDA inputs = [ @@ -18,16 +18,14 @@ class VizFeatures(PluginNode): name='inputSfM', label='SfMData', description='SfMData file.', - value='', - uid=[0], + value='' ), desc.File( name="featureFolder", label="Feature Folder", description="Featurefolder", - value="", - uid=[0], + value="" ), desc.File( @@ -35,7 +33,7 @@ class VizFeatures(PluginNode): label="Match Folder", description="Featurefolder", value="", - uid=[0], + ), desc.IntParam( @@ -44,7 +42,7 @@ class VizFeatures(PluginNode): description="Only display first n matches", range=(1,1000,1), value=0, - uid=[0], + ), desc.BoolParam( @@ -52,7 +50,7 @@ class VizFeatures(PluginNode): label="matchOnly", description="Only display the matches", value=True, - uid=[0], + ), desc.IntParam( @@ -61,7 +59,7 @@ class VizFeatures(PluginNode): description="marker wize /2", range=(1,1000,1), value=1, - uid=[0], + ), @@ -72,7 +70,7 @@ class VizFeatures(PluginNode): value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, - uid=[0], + ), ] @@ -83,7 +81,7 @@ class VizFeatures(PluginNode): label='outputFolder', description='outputFolder', value=desc.Node.internalFolder, - uid=[], + group='', ), desc.File( @@ -92,7 +90,7 @@ class VizFeatures(PluginNode): description='featureViz', semantic='image', value=os.path.join(desc.Node.internalFolder, 'features_.png'), - uid=[], + group='', ), desc.File( @@ -101,7 +99,7 @@ class VizFeatures(PluginNode): description='matchingViz', semantic='image', value=os.path.join(desc.Node.internalFolder, 'matches_.png'), - uid=[], + group='', ), ] @@ -112,12 +110,11 @@ def processChunk(self, chunk): import numpy as np import json import cv2 - from mrrs.core.ios import open_image, save_image - from mrrs.deep_feature_matching.kornia_wrappers.utils import open_matches - - def draw_keypoints(image, keypoints, downsample=1, p = 2, o = 0): + from mrrs.core.ios import open_image, save_image, open_matches + + def draw_keypoints(image, keypoints, downsample=1, p = 2, o = 0, cols=[0,255,0]): for kp in keypoints[::downsample]: - image[int(kp[1])-p:int(kp[1])+p, o+int(kp[0])-p:o+int(kp[0])+p, :]=[0,255,0] + image[int(kp[1])-p:int(kp[1])+p, o+int(kp[0])-p:o+int(kp[0])+p, :]=cols return image def get_best_matching_view(view_matches): @@ -129,7 +126,7 @@ def get_best_matching_view(view_matches): chunk.logManager.start(chunk.node.verboseLevel.value) if chunk.node.inputSfM.value == '': - raise RuntimeError("No inputSfM specified") + chunk.logger.warning("No inputSfM specified") sfm_data=json.load(open(chunk.node.inputSfM.value,"r")) feature_files = os.listdir(chunk.node.featureFolder.value) @@ -150,49 +147,62 @@ def get_best_matching_view(view_matches): chunk.logger.info('Displaying Matching') match_file = [os.path.join(chunk.node.matcheFolder.value, mf) for mf in os.listdir(chunk.node.matcheFolder.value) if mf.endswith(".txt")][0] chunk.logger.info('Opening matches') - matches = open_matches(match_file) + matches,_ = open_matches(match_file) chunk.logger.info('Done open') - for view_id_0 in matches.keys(): - chunk.logger.info('Matching for view '+view_id_0) + for i, view_id_0 in enumerate(matches.keys()): + chunk.logger.info('%d/%d'%(i, len(matches.keys()))+' Matching for view '+view_id_0) #for now, only select the best matched view (the one with most matches) # view_id_1, matches_0_to_1=get_best_matching_view(matches[view_id_0]) # chunk.logger.info('Best matcing for view '+view_id_0+" is "+view_id_1+ " (%d matches)"%len(matches_0_to_1)) for view_id_1 in matches[view_id_0].keys(): matches_0_to_1 = matches[view_id_0][view_id_1] + if matches_0_to_1.shape[0] == 0: + chunk.logger.info("No matches for "+view_id_0+" "+view_id_1+"\n. ") + continue if (( matches_0_to_1[:,0].shape[0] != np.unique(matches_0_to_1[:,0]).shape[0] ) or ( matches_0_to_1[:,1].shape[0] != np.unique(matches_0_to_1[:,1]).shape[0] ) ): - # raise RuntimeError("Found duplicated points fo images "+view_id_0+" "+view_id_1+"\n. ") - print("Found duplicated points fo images "+view_id_0+" "+view_id_1+"\n. ") - + # chunk.logger.warning("Found duplicated points fo images "+view_id_0+" "+view_id_1+"\n. ") + print("Found duplicated points for images "+view_id_0+" "+view_id_1) if chunk.node.matchOnly.value: #if match only, will only display line image_file_0 = [view["path"] for view in sfm_data["views"] if view["viewId"]==view_id_0][0] image_file_1 = [view["path"] for view in sfm_data["views"] if view["viewId"]==view_id_1][0] else: image_file_0 = os.path.join(chunk.node.outputFolder.value, "features_"+view_id_0+".png") - image_file_1 = os.path.join(chunk.node.outputFolder.value, "features_"+view_id_1+".png") - - image_0 = open_image(image_file_0) - image_1 = open_image(image_file_1) - + image_file_1 = os.path.join(chunk.node.outputFolder.value, "features_"+view_id_1+".png") + image_0 = open_image(image_file_0)[:,:,0:3] + image_1 = open_image(image_file_1)[:,:,0:3] match_image = np.concatenate([image_0, image_1], axis=1) keypoint_file_0 = [os.path.join(chunk.node.featureFolder.value, ff) for ff in feature_files if ((view_id_0 in ff) and ff.endswith(".feat"))][0] keypoint_file_1 = [os.path.join(chunk.node.featureFolder.value, ff) for ff in feature_files if ((view_id_1 in ff) and ff.endswith(".feat"))][0] keypoints_0 = np.loadtxt(keypoint_file_0) keypoints_1 = np.loadtxt(keypoint_file_1) + + #sort by confidence if any + if matches_0_to_1.shape[-1]>2: + chunk.logger.info("Sorting by confidence") + matches_0_to_1=np.asarray(sorted(matches_0_to_1, key=lambda m:m[2], reverse=True)) + #else random + else: + matches_0_to_1=np.asarray(sorted(matches_0_to_1, key=lambda m:np.random.rand(1), reverse=True)) + o=image_0.shape[1] - for m in matches_0_to_1[0:chunk.node.keepMatches.value]: + for m in matches_0_to_1[0:chunk.node.keepMatches.value].astype(np.int32): if m[0]>keypoints_0.shape[0]: - raise RuntimeError("ERROR FEATURE INDEX IN MATCH OUTSIDE OF LISTED FEATURES FOR %s (%d vs %d)"%(view_id_0, m[0],keypoints_0.shape[0])) + chunk.logger.warning("ERROR FEATURE INDEX IN MATCH OUTSIDE OF LISTED FEATURES FOR %s (%d vs %d)"%(view_id_0, m[0],keypoints_0.shape[0])) + continue if m[1]>keypoints_1.shape[0]: - raise RuntimeError("ERROR FEATURE INDEX IN MATCH OUTSIDE OF LISTED FEATURES FOR %s (%d vs %d)"%(view_id_1, m[1],keypoints_1.shape[0])) + chunk.logger.warning("ERROR FEATURE INDEX IN MATCH OUTSIDE OF LISTED FEATURES FOR %s (%d vs %d)"%(view_id_1, m[1],keypoints_1.shape[0])) + continue kp0 = keypoints_0[m[0]] kp1 = keypoints_1[m[1]] + if chunk.node.matchOnly.value: + color= (np.random.rand(3)*255).astype(np.uint8) match_image=draw_keypoints(match_image, np.asarray( [(int(kp0[0]),int(kp0[1])), - (int(o+kp1[0]),int(kp1[1]))]) ) - cv2.line(match_image, (int(kp0[0]),int(kp0[1])), (int(o+kp1[0]),int(kp1[1])), color = [0,0,255]) + (int(o+kp1[0]),int(kp1[1]))]), cols=color ) + cv2.line(match_image, (int(kp0[0]),int(kp0[1])), (int(o+kp1[0]),int(kp1[1])), color = color.tolist()) save_image(os.path.join(chunk.node.outputFolder.value, "matches_"+view_id_0+"_"+view_id_1+".png"), match_image) - + chunk.logManager.end() diff --git a/mrrs/deep_feature_matching/VizTracks.py b/mrrs/deep_feature_matching/VizTracks.py index b712217..cfd3b9a 100644 --- a/mrrs/deep_feature_matching/VizTracks.py +++ b/mrrs/deep_feature_matching/VizTracks.py @@ -20,7 +20,6 @@ class VizTracks(PluginNode): label='SfMData', description='SfMData file.', value='', - uid=[0], ), desc.File( @@ -28,7 +27,7 @@ class VizTracks(PluginNode): label='inputTracks', description='inputTracks', value='', - uid=[0], + ), desc.File( @@ -36,7 +35,7 @@ class VizTracks(PluginNode): label="Feature Folder", description="Featurefolder", value="", - uid=[0], + ), desc.ChoiceParam( @@ -49,7 +48,6 @@ class VizTracks(PluginNode): "akaze_mldb", "cctag3", "cctag4", "sift_ocv", "akaze_ocv", "tag16h5", "unknown"], exclusive=True, - uid=[0] ), desc.ChoiceParam( @@ -59,7 +57,6 @@ class VizTracks(PluginNode): value="info", values=["fatal", "error", "warning", "info", "debug", "trace"], exclusive=True, - uid=[], ) ] @@ -69,7 +66,6 @@ class VizTracks(PluginNode): label='outputFolder', description='outputFolder', value=desc.Node.internalFolder, - uid=[], group='', ), desc.File( @@ -78,7 +74,6 @@ class VizTracks(PluginNode): description='trackViz', semantic='image', value=os.path.join(desc.Node.internalFolder, 'tracks_.png'), - uid=[], group='', ), ] diff --git a/mrrs/deep_feature_matching/env.yaml b/mrrs/deep_feature_matching/env.yaml index a6245b3..44db851 100644 --- a/mrrs/deep_feature_matching/env.yaml +++ b/mrrs/deep_feature_matching/env.yaml @@ -18,7 +18,6 @@ dependencies: - torchvision - opencv - kornia - #for meshroom & mrrs - openimageio - py-openimageio diff --git a/mrrs/deep_feature_matching/kornia_wrappers/__init__.py b/mrrs/deep_feature_matching/kornia_wrappers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mrrs/deep_feature_matching/kornia_wrappers/utils.py b/mrrs/deep_feature_matching/kornia_wrappers/utils.py index 9f7102f..629e924 100644 --- a/mrrs/deep_feature_matching/kornia_wrappers/utils.py +++ b/mrrs/deep_feature_matching/kornia_wrappers/utils.py @@ -43,70 +43,3 @@ def __str__(self): return str(float(self)) def __repr__(self): return str(float(self)) - -#FIXME: all of this should be in core -def open_image_grapĥ(imagepairs, nb_image): - with open(imagepairs, 'r') as matchfile: - matches_raw = matchfile.readlines() - #one line per image - image_pairs = [line.strip().split(" ") for line in matches_raw] - if len(image_pairs) != nb_image: - if len(image_pairs) == nb_image-1:#file is not properly written in AV, if last image no match, no \n - image_pairs.append("") - else: - raise RuntimeError("Malformed image match file, %d vs %d images"%(len(image_pairs), nb_image-1)) - return image_pairs - -def open_descriptor_file(descriptor_file): - with open(descriptor_file, "rb") as df: - #read number of desc from first byte - nb_desv_encoded = struct.unpack('N', df.read(struct.calcsize('N')))[0] - #read rematinign floats - descriptors = np.asarray(list(struct.iter_unpack('f', df.read()))) - descriptors=np.reshape(descriptors, (nb_desv_encoded, -1)) - return descriptors - -def write_descriptor_file(descriptors, desk_filename): - with open(desk_filename, "wb") as df: - #nb of desc, as size_t (should be 1 byte) - nb_desv_encoded = struct.pack('N', int(descriptors.shape[0])) - df.write(nb_desv_encoded) - for descriptor in descriptors:#write descriptor as floats (4 bytes) - for d in descriptor: - d=struct.pack('f', d) - df.write(d) - -def parse_line(matches): - result = [m.strip() for m in matches.readline().split(" ")] - if len(result) == 1: - if result[0] == "": - return None - result = result[0] - return result - -def open_matches(match_file): - with open(match_file, "r") as match_file: - match_data = {} - while True: - view_ids = parse_line(match_file) - if view_ids is None: - break - view_id_0, view_id_1 = view_ids - nb_type_feat = parse_line(match_file) - if nb_type_feat != "1": - raise RuntimeError("Only supports one descriptor type at the time") - type_feat, nb_match = parse_line(match_file) - nb_match = int(nb_match) - matches_raw = [match_file.readline() for _ in range(nb_match)] - #avoid the squeeze when onlly one match - if len(matches_raw) == 1: - matches = np.expand_dims(np.loadtxt(matches_raw).astype(np.int32), axis=0) - else: - matches = np.loadtxt(matches_raw).astype(np.int32) - if matches.shape[0] != nb_match: - raise RuntimeError("Unexpected number of matches for view %s %d vs %d"%(view_id_0, matches.shape[0], nb_match)) - #save result - if not (view_id_0 in match_data.keys()): - match_data[view_id_0]={} - match_data[view_id_0][view_id_1] = matches - return match_data \ No newline at end of file