From 52a0e47d01daaa8bbb00ce8a61fb77831b1c8e09 Mon Sep 17 00:00:00 2001 From: Rene Date: Fri, 9 Aug 2024 14:35:23 +0200 Subject: [PATCH 1/2] Minor bug fix --- src/BasisConvolution/convNetv2.py | 31 +++++++++++++++----- src/BasisConvolution/util/hyperparameters.py | 4 +-- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/BasisConvolution/convNetv2.py b/src/BasisConvolution/convNetv2.py index 2bbee3d..37386a1 100644 --- a/src/BasisConvolution/convNetv2.py +++ b/src/BasisConvolution/convNetv2.py @@ -395,7 +395,7 @@ def forward(self, \ # print(f'(post encoder) fluidFeatures: {fluidFeatures.shape}') fluidFeatures = fluidFeatures.view(-1, *fluidFeatures.shape[2:]) if verbose: - print(f'(post encoder) fluidFeatures: {fluidFeatures.shape}') + print(f'(post encoder) fluidFeatures: {fluidFeatures.shape} [min: {torch.min(fluidFeatures)}, max: {torch.max(fluidFeatures)}, mean: {torch.mean(fluidFeatures)}]') # print(f'(post encoder) fluidFeatures: {fluidFeatures.shape}') self.ni = ni @@ -418,6 +418,8 @@ def forward(self, \ if verbose: print(f'Running Convolution (FTF) {self.convs[0].inputFeatures} -> {self.convs[0].outputFeatures} features') fluidConvolution = (self.convs[0]((fluidFeatures, fluidFeatures), fluidEdgeIndex, fluidEdgeLengths, fluidEdgeWeights)) + if verbose: + print(f'Result: [min: {torch.min(fluidConvolution)}, max: {torch.max(fluidConvolution)}, mean: {torch.mean(fluidConvolution)}]') # fluidConvolution = scatter_sum(baseArea * fluidFeatures[fluidEdgeIndex[1]] * kernelGradient(torch.abs(fluidEdgeLengths), torch.sign(fluidEdgeLengths), particleSupport), fluidEdgeIndex[0], dim = 0, dim_size = fluidFeatures.shape[0]) if len(self.layers) == 1: @@ -426,25 +428,35 @@ def forward(self, \ print(f'Running Convolution (BTF) {self.convs[1].inputFeatures} -> {self.convs[1].outputFeatures} features') boundaryConvolution = (self.convs[1]((fluidFeatures, boundaryFeatures), boundaryEdgeIndex, boundaryEdgeLengths, boundaryEdgeWeights)) fluidConvolution += boundaryConvolution + if verbose: + print(f'Result: [min: {torch.min(boundaryConvolution)}, max: {torch.max(boundaryConvolution)}, mean: {torch.mean(boundaryConvolution)}]') if self.layerMLP: + if verbose: + print(f'Running Linear {self.fcs[0].in_features} -> {self.fcs[0].out_features} features') fluidConvolution = self.mlps[0](fluidConvolution) + if verbose: + print(f'Result: [min: {torch.min(fluidConvolution)}, max: {torch.max(fluidConvolution)}, mean: {torch.mean(fluidConvolution)}]') if self.outputDecoder is not None: if verbose: print(f'(pre outputDecoder) fluidConvolution: {fluidConvolution.shape}') fluidConvolution = self.outputDecoder(fluidConvolution.view(batches,-1, *fluidConvolution.shape[1:])) fluidConvolution = fluidConvolution.view(-1, *fluidConvolution.shape[2:]) if verbose: - print(f'(post outputDecoder) fluidConvolution: {fluidConvolution.shape}') + print(f'(post outputDecoder) fluidConvolution: {fluidConvolution.shape} [min: {torch.min(fluidConvolution)}, max: {torch.max(fluidConvolution)}, mean: {torch.mean(fluidConvolution)}]') return fluidConvolution if verbose: print(f'Running Linear {self.fcs[0].in_features} -> {self.fcs[0].out_features} features') linearOutput = (self.fcs[0](fluidFeatures)) + if verbose: + print(f'Result [min: {torch.min(linearOutput)}, max: {torch.max(linearOutput)}, mean: {torch.mean(linearOutput)}]') if self.hasBoundaryLayers: if verbose: print(f'Running Convolution {self.convs[1].inputFeatures} -> {self.convs[1].outputFeatures} features') boundaryConvolution = (self.convs[1]((fluidFeatures, boundaryFeatures), boundaryEdgeIndex, boundaryEdgeLengths, boundaryEdgeWeights)) + if verbose: + print(f'Result [min: {torch.min(boundaryConvolution)}, max: {torch.max(boundaryConvolution)}, mean: {torch.mean(boundaryConvolution)}]') ans = torch.hstack((linearOutput, fluidConvolution, boundaryConvolution)) else: ans = torch.hstack((linearOutput, fluidConvolution)) @@ -470,11 +482,15 @@ def forward(self, \ print(f'Relu {ans.shape}') ansc = self.relu(ans) if verbose: - print(f'Running Convolution {self.convs[i].inputFeatures} -> {self.convs[i].outputFeatures} features') + print(f'Layer[{i}]:\tResult for layer {i-1} [min: {torch.min(ansc)}, max: {torch.max(ansc)}, mean: {torch.mean(ansc)}] | [min: {torch.min(ans)}, max: {torch.max(ans)}, mean: {torch.mean(ans)}]') + print(f'Layer[{i}]:\tRunning Convolution {self.convs[i].inputFeatures} -> {self.convs[i].outputFeatures} features') ansConv = self.convs[i]((ansc, ansc), fluidEdgeIndex, fluidEdgeLengths, fluidEdgeWeights) if verbose: - print(f'Running Linear {self.fcs[i - (1 if self.hasBoundaryLayers else 0)].in_features} -> {self.fcs[i - (1 if self.hasBoundaryLayers else 0)].out_features} features') + print(f'Layer[{i}]:\t\tResult [min: {torch.min(ansConv)}, max: {torch.max(ansConv)}, mean: {torch.mean(ansConv)}]') + print(f'Layer[{i}]:\tRunning Linear {self.fcs[i - (1 if self.hasBoundaryLayers else 0)].in_features} -> {self.fcs[i - (1 if self.hasBoundaryLayers else 0)].out_features} features') ansDense = self.fcs[i - (1 if self.hasBoundaryLayers else 0)](ansc) + if verbose: + print(f'Layer[{i}]:\t\tResult [min: {torch.min(ansDense)}, max: {torch.max(ansDense)}, mean: {torch.mean(ansDense)}]') if self.features[i- (2 if self.hasBoundaryLayers else 1)] == self.features[i-(1 if self.hasBoundaryLayers else 0)] and ans.shape == ansConv.shape: @@ -485,22 +501,23 @@ def forward(self, \ ans = self.mlps[i](ans) if self.edgeMLP is not None and i < layers - 1: if verbose: - print(f'Running Edge MLP {self.edgeMLP["inputFeatures"]} -> {self.edgeMLP["output"]} features') + print(f'Layer[{i}]:\tRunning Edge MLP {self.edgeMLP["inputFeatures"]} -> {self.edgeMLP["output"]} features') fluidEdgeLengths = self.edgeMLPs[i](fluidEdgeLengths) fluidEdgeLengths = fluidEdgeLengths.clamp(-1,1) if self.vertexMLP is not None and i < layers - 1: if verbose: - print(f'Running Vertex MLP {self.vertexMLPDicts[i]["inputFeatures"]} -> {self.vertexMLPDicts[i]["output"]} features') + print(f'Layer[{i}]:\tRunning Vertex MLP {self.vertexMLPDicts[i]["inputFeatures"]} -> {self.vertexMLPDicts[i]["output"]} features') transposedFeatures = ans.view(batches,-1, *ans.shape[1:]) ans = self.vertexMLPs[i](transposedFeatures) ans = ans.view(-1, *ans.shape[2:]) + if self.outputDecoder is not None: if verbose: print(f'(pre outputDecoder) ans: {ans.shape}') ans = self.outputDecoder(ans.view(batches,-1, *ans.shape[1:])) ans = ans.view(-1, *ans.shape[2:]) if verbose: - print(f'(post outputDecoder) ans: {ans.shape}') + print(f'(post outputDecoder) ans: {ans.shape} [min: {torch.min(ans)}, max: {torch.max(ans)}, mean: {torch.mean(ans)}]') return ans * self.outputScaling #(ans * outputScaling) if self.dim == 2 else ans \ No newline at end of file diff --git a/src/BasisConvolution/util/hyperparameters.py b/src/BasisConvolution/util/hyperparameters.py index 5fa7417..7f8b527 100644 --- a/src/BasisConvolution/util/hyperparameters.py +++ b/src/BasisConvolution/util/hyperparameters.py @@ -585,9 +585,9 @@ def finalizeHyperParameters(hyperParameterDict, dataset): hyperParameterDict['layers'] = [int(s) for s in hyperParameterDict['widths']] - hyperParameterDict['shortLabel'] = f'{hyperParameterDict["networkType"]:8s} [{hyperParameterDict["arch"]:14s}] -> [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}, {hyperParameterDict["fluidFeatures"]} -> {hyperParameterDict["groundTruth"]}' + hyperParameterDict['shortLabel'] = f'{hyperParameterDict["networkType"]:8s} [{hyperParameterDict["arch"]:14s}] - [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}, {hyperParameterDict["fluidFeatures"]} - {hyperParameterDict["groundTruth"]}' - hyperParameterDict['progressLabel'] = f'{hyperParameterDict["networkType"]:8s} [{hyperParameterDict["arch"]:4s}] -> [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}' + hyperParameterDict['progressLabel'] = f'{hyperParameterDict["networkType"]:8s} [{hyperParameterDict["arch"]:4s}] - [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}' hyperParameterDict['exportLabel'] = f'{hyperParameterDict["timestamp"]} - {hyperParameterDict["networkSeed"]} - {hyperParameterDict["shortLabel"]}'.replace(":", ".").replace("/", "_") From bcf34f52db1227c1164b6535168cbc2fc703de9f Mon Sep 17 00:00:00 2001 From: Rene Date: Fri, 9 Aug 2024 15:02:05 +0200 Subject: [PATCH 2/2] Updated label info --- src/BasisConvolution/util/hyperparameters.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/BasisConvolution/util/hyperparameters.py b/src/BasisConvolution/util/hyperparameters.py index 7f8b527..913c55f 100644 --- a/src/BasisConvolution/util/hyperparameters.py +++ b/src/BasisConvolution/util/hyperparameters.py @@ -584,10 +584,11 @@ def finalizeHyperParameters(hyperParameterDict, dataset): hyperParameterDict['widths'] = hyperParameterDict['arch'].strip().split(' ') hyperParameterDict['layers'] = [int(s) for s in hyperParameterDict['widths']] + hyperParameterDict['mlpLabel'] = f'[{"V" if hyperParameterDict["vertexMLP"] is not None else " "}{"E" if hyperParameterDict["edgeMLP"] is not None else " "}{"I" if hyperParameterDict["inputEncoder"] is not None else " "}{"O" if hyperParameterDict["outputDecoder"] is not None else " "}]' - hyperParameterDict['shortLabel'] = f'{hyperParameterDict["networkType"]:8s} [{hyperParameterDict["arch"]:14s}] - [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}, {hyperParameterDict["fluidFeatures"]} - {hyperParameterDict["groundTruth"]}' + hyperParameterDict['shortLabel'] = f'{hyperParameterDict["networkType"]:8s}{"+loss" if hyperParameterDict["shiftLoss"] else ""} [{hyperParameterDict["arch"]:14s}] - [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}, {hyperParameterDict["fluidFeatures"]} - {hyperParameterDict["groundTruth"]} {hyperParameterDict["mlpLabel"]}' - hyperParameterDict['progressLabel'] = f'{hyperParameterDict["networkType"]:8s} [{hyperParameterDict["arch"]:4s}] - [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s}' + hyperParameterDict['progressLabel'] = f'{hyperParameterDict["networkType"]:8s}{"+loss" if hyperParameterDict["shiftLoss"] else ""} [{hyperParameterDict["arch"]:4s}] - [{hyperParameterDict["basisFunctions"]:8s}] x [{hyperParameterDict["basisTerms"]:2d}] @ {hyperParameterDict["coordinateMapping"]:4s}/{hyperParameterDict["windowFunction"] if hyperParameterDict["windowFunction"] is not None else "None":4s} {hyperParameterDict["mlpLabel"]}' hyperParameterDict['exportLabel'] = f'{hyperParameterDict["timestamp"]} - {hyperParameterDict["networkSeed"]} - {hyperParameterDict["shortLabel"]}'.replace(":", ".").replace("/", "_")