diff --git a/DataFormats/HGCalReco/interface/Common.h b/DataFormats/HGCalReco/interface/Common.h index cbe7caf87117a..af1ad19f9ccb7 100644 --- a/DataFormats/HGCalReco/interface/Common.h +++ b/DataFormats/HGCalReco/interface/Common.h @@ -14,7 +14,20 @@ namespace ticl { static constexpr int nLayers = 104; static constexpr int iterations = 4; static constexpr int nBins = nEtaBins * nPhiBins; + static constexpr int type = 0; }; + + struct TileConstantsHFNose { + static constexpr float minEta = 3.0f; + static constexpr float maxEta = 4.2f; + static constexpr int nEtaBins = 24; + static constexpr int nPhiBins = 126; + static constexpr int nLayers = 16; // 8x2 + static constexpr int iterations = 4; + static constexpr int nBins = nEtaBins * nPhiBins; + static constexpr int type = 1; + }; + } // namespace ticl namespace ticl { diff --git a/DataFormats/HGCalReco/interface/TICLLayerTile.h b/DataFormats/HGCalReco/interface/TICLLayerTile.h index 01f0b2e3ca7b0..5d9cc8fb51a1c 100644 --- a/DataFormats/HGCalReco/interface/TICLLayerTile.h +++ b/DataFormats/HGCalReco/interface/TICLLayerTile.h @@ -14,6 +14,8 @@ class TICLLayerTileT { tile_[globalBin(eta, phi)].push_back(layerClusterId); } + int typeT() const { return T::type; } + int etaBin(float eta) const { constexpr float etaRange = T::maxEta - T::minEta; static_assert(etaRange >= 0.f); @@ -31,6 +33,14 @@ class TICLLayerTileT { return phiBin; } + std::array searchBoxEtaPhi(float etaMin, float etaMax, float phiMin, float phiMax) const { + int etaBinMin = etaBin(etaMin); + int etaBinMax = etaBin(etaMax); + int phiBinMin = phiBin(phiMin); + int phiBinMax = phiBin(phiMax); + return std::array({{etaBinMin, etaBinMax, phiBinMin, phiBinMax}}); + } + int globalBin(int etaBin, int phiBin) const { return phiBin + etaBin * T::nPhiBins; } int globalBin(double eta, double phi) const { return phiBin(phi) + etaBin(eta) * T::nPhiBins; } @@ -51,6 +61,11 @@ namespace ticl { using TICLLayerTile = TICLLayerTileT; using Tiles = std::array; using TracksterTiles = std::array; + + using TICLLayerTileHFNose = TICLLayerTileT; + using TilesHFNose = std::array; + using TracksterTilesHFNose = std::array; + } // namespace ticl template @@ -68,5 +83,7 @@ class TICLGenericTile { using TICLLayerTiles = TICLGenericTile; using TICLTracksterTiles = TICLGenericTile; +using TICLLayerTilesHFNose = TICLGenericTile; +using TICLTracksterTilesHFNose = TICLGenericTile; #endif diff --git a/DataFormats/HGCalReco/src/classes_def.xml b/DataFormats/HGCalReco/src/classes_def.xml index 1d88912bd9af1..4eb960800b479 100644 --- a/DataFormats/HGCalReco/src/classes_def.xml +++ b/DataFormats/HGCalReco/src/classes_def.xml @@ -25,6 +25,20 @@ + + + + + + + + + + + + + + diff --git a/RecoEgamma/EgammaTools/interface/HGCalEgammaIDHelper.h b/RecoEgamma/EgammaTools/interface/HGCalEgammaIDHelper.h index 2d6eadb4b9a39..9e7360c6c9f55 100644 --- a/RecoEgamma/EgammaTools/interface/HGCalEgammaIDHelper.h +++ b/RecoEgamma/EgammaTools/interface/HGCalEgammaIDHelper.h @@ -14,6 +14,7 @@ #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Utilities/interface/InputTag.h" +#include "FWCore/Utilities/interface/ESGetToken.h" #include "FWCore/Framework/interface/ConsumesCollector.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" @@ -78,6 +79,7 @@ class HGCalEgammaIDHelper { edm::EDGetTokenT recHitsEE_; edm::EDGetTokenT recHitsFH_; edm::EDGetTokenT recHitsBH_; + edm::ESGetToken caloGeometry_; hgcal::RecHitTools recHitTools_; bool debug_; }; diff --git a/RecoEgamma/EgammaTools/src/HGCalEgammaIDHelper.cc b/RecoEgamma/EgammaTools/src/HGCalEgammaIDHelper.cc index 5331e24782291..0dfbadabfc00d 100644 --- a/RecoEgamma/EgammaTools/src/HGCalEgammaIDHelper.cc +++ b/RecoEgamma/EgammaTools/src/HGCalEgammaIDHelper.cc @@ -14,6 +14,7 @@ HGCalEgammaIDHelper::HGCalEgammaIDHelper(const edm::ParameterSet& iConfig, edm:: recHitsEE_ = iC.consumes(eeRecHitInputTag_); recHitsFH_ = iC.consumes(fhRecHitInputTag_); recHitsBH_ = iC.consumes(bhRecHitInputTag_); + caloGeometry_ = iC.esConsumes(); pcaHelper_.setdEdXWeights(dEdXWeights_); debug_ = iConfig.getUntrackedParameter("debug", false); } @@ -26,7 +27,8 @@ void HGCalEgammaIDHelper::eventInit(const edm::Event& iEvent, const edm::EventSe edm::Handle recHitHandleBH; iEvent.getByToken(recHitsBH_, recHitHandleBH); - recHitTools_.getEventSetup(iSetup); + edm::ESHandle geom = iSetup.getHandle(caloGeometry_); + recHitTools_.setGeometry(*geom); pcaHelper_.setRecHitTools(&recHitTools_); isoHelper_.setRecHitTools(&recHitTools_); pcaHelper_.fillHitMap(*recHitHandleEE, *recHitHandleFH, *recHitHandleBH); diff --git a/RecoHGCal/Configuration/python/RecoHGCal_EventContent_cff.py b/RecoHGCal/Configuration/python/RecoHGCal_EventContent_cff.py index 18c568e4a74b4..77b647e6ce291 100644 --- a/RecoHGCal/Configuration/python/RecoHGCal_EventContent_cff.py +++ b/RecoHGCal/Configuration/python/RecoHGCal_EventContent_cff.py @@ -7,6 +7,7 @@ 'keep *_ticlMultiClustersFromTrackstersEM_*_*', 'keep *_ticlMultiClustersFromTrackstersHAD_*_*', 'keep *_ticlMultiClustersFromTrackstersTrk_*_*', + 'keep *_ticlMultiClustersFromTrackstersTrkEM_*_*', 'keep *_ticlMultiClustersFromTrackstersMIP_*_*', 'keep *_ticlMultiClustersFromTrackstersMerge_*_*', ) @@ -15,12 +16,15 @@ #RECO content TICL_RECO = cms.PSet( outputCommands = cms.untracked.vstring( + 'keep *_ticlTrackstersTrkEM_*_*', 'keep *_ticlTrackstersEM_*_*', 'keep *_ticlTrackstersHAD_*_*', 'keep *_ticlTrackstersTrk_*_*', 'keep *_ticlTrackstersMIP_*_*', 'keep *_ticlTrackstersMerge_*_*', - 'keep *_ticlCandidateFromTracksters_*_*', + 'keep *_ticlTrackstersHFNoseEM_*_*', + 'keep *_ticlTrackstersHFNoseMIP_*_*', + 'keep *_ticlTrackstersHFNoseMerge_*_*', 'keep *_pfTICL_*_*' ) ) diff --git a/RecoHGCal/TICL/plugins/ClusterFilterByAlgoAndSizeAndLayerRange.h b/RecoHGCal/TICL/plugins/ClusterFilterByAlgoAndSizeAndLayerRange.h new file mode 100644 index 0000000000000..5f1d9388c3fe4 --- /dev/null +++ b/RecoHGCal/TICL/plugins/ClusterFilterByAlgoAndSizeAndLayerRange.h @@ -0,0 +1,55 @@ +// Authors: Marco Rovere - marco.rovere@cern.ch, Felice Pantaleo - felice.pantaleo@cern.ch +// Date: 09/2020 + +#ifndef RecoHGCal_TICL_ClusterFilterByAlgoAndSizeAndLayerRange_H__ +#define RecoHGCal_TICL_ClusterFilterByAlgoAndSizeAndLayerRange_H__ + +#include "DataFormats/CaloRecHit/interface/CaloCluster.h" +#include "ClusterFilterBase.h" + +#include +#include + +// Filter clusters that belong to a specific algorithm +namespace ticl { + class ClusterFilterByAlgoAndSizeAndLayerRange final : public ClusterFilterBase { + public: + ClusterFilterByAlgoAndSizeAndLayerRange(const edm::ParameterSet& ps) + : ClusterFilterBase(ps), + algo_number_(ps.getParameter("algo_number")), + min_cluster_size_(ps.getParameter("min_cluster_size")), + max_cluster_size_(ps.getParameter("max_cluster_size")), + min_layerId_(ps.getParameter("min_layerId")), + max_layerId_(ps.getParameter("max_layerId")) {} + ~ClusterFilterByAlgoAndSizeAndLayerRange() override{}; + + void filter(const std::vector& layerClusters, + const HgcalClusterFilterMask& availableLayerClusters, + std::vector& layerClustersMask, + hgcal::RecHitTools& rhtools) const override { + auto filteredLayerClusters = std::make_unique(); + for (auto const& cl : availableLayerClusters) { + auto const& layerCluster = layerClusters[cl.first]; + auto const& haf = layerCluster.hitsAndFractions(); + auto layerId = rhtools.getLayerWithOffset(haf[0].first); + + if (layerCluster.algo() == algo_number_ and layerId <= max_layerId_ and layerId >= min_layerId_ and + haf.size() <= max_cluster_size_ and + (haf.size() >= min_cluster_size_ or !(rhtools.isSilicon(haf[0].first)))) { + filteredLayerClusters->emplace_back(cl); + } else { + layerClustersMask[cl.first] = 0.; + } + } + } + + private: + int algo_number_; + unsigned int min_cluster_size_; + unsigned int max_cluster_size_; + unsigned int min_layerId_; + unsigned int max_layerId_; + }; +} // namespace ticl + +#endif diff --git a/RecoHGCal/TICL/plugins/FilteredLayerClustersProducer.cc b/RecoHGCal/TICL/plugins/FilteredLayerClustersProducer.cc index e0dfc901b008e..e3f0204cf47ba 100644 --- a/RecoHGCal/TICL/plugins/FilteredLayerClustersProducer.cc +++ b/RecoHGCal/TICL/plugins/FilteredLayerClustersProducer.cc @@ -10,6 +10,7 @@ #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/ParameterSet/interface/ParameterSetDescription.h" +#include "FWCore/Utilities/interface/ESGetToken.h" #include "DataFormats/ParticleFlowReco/interface/PFCluster.h" #include "RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h" @@ -30,6 +31,7 @@ class FilteredLayerClustersProducer : public edm::stream::EDProducer<> { private: edm::EDGetTokenT> clusters_token_; edm::EDGetTokenT> clustersMask_token_; + edm::ESGetToken caloGeometry_token_; std::string clusterFilter_; std::string iteration_label_; std::unique_ptr theFilter_; @@ -39,26 +41,32 @@ class FilteredLayerClustersProducer : public edm::stream::EDProducer<> { DEFINE_FWK_MODULE(FilteredLayerClustersProducer); FilteredLayerClustersProducer::FilteredLayerClustersProducer(const edm::ParameterSet& ps) { - clusters_token_ = consumes>(ps.getParameter("HGCLayerClusters")); + clusters_token_ = consumes>(ps.getParameter("LayerClusters")); clustersMask_token_ = consumes>(ps.getParameter("LayerClustersInputMask")); + caloGeometry_token_ = esConsumes(); clusterFilter_ = ps.getParameter("clusterFilter"); theFilter_ = ClusterFilterFactory::get()->create(clusterFilter_, ps); iteration_label_ = ps.getParameter("iteration_label"); produces>(iteration_label_); } -void FilteredLayerClustersProducer::beginRun(edm::Run const&, edm::EventSetup const& es) { rhtools_.getEventSetup(es); } +void FilteredLayerClustersProducer::beginRun(edm::Run const&, edm::EventSetup const& es) { + edm::ESHandle geom = es.getHandle(caloGeometry_token_); + rhtools_.setGeometry(*geom); +} void FilteredLayerClustersProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { // hgcalMultiClusters edm::ParameterSetDescription desc; - desc.add("HGCLayerClusters", edm::InputTag("hgcalLayerClusters")); + desc.add("LayerClusters", edm::InputTag("hgcalLayerClusters")); desc.add("LayerClustersInputMask", edm::InputTag("hgcalLayerClusters", "InitialLayerClustersMask")); desc.add("iteration_label", "iterationLabelGoesHere"); desc.add("clusterFilter", "ClusterFilterByAlgoAndSize"); desc.add("algo_number", 9); desc.add("min_cluster_size", 0); desc.add("max_cluster_size", 9999); + desc.add("min_layerId", 0); + desc.add("max_layerId", 9999); descriptions.add("filteredLayerClustersProducer", desc); } diff --git a/RecoHGCal/TICL/plugins/HGCDoublet.cc b/RecoHGCal/TICL/plugins/HGCDoublet.cc index 2cddcb6684288..cac2c4d1a52df 100644 --- a/RecoHGCal/TICL/plugins/HGCDoublet.cc +++ b/RecoHGCal/TICL/plugins/HGCDoublet.cc @@ -81,14 +81,17 @@ int HGCDoublet::areAligned(double xi, // inner product auto dot = dx1 * dx2 + dy1 * dy2 + dz1 * dz2; + auto dotsq = dot * dot; // magnitudes - auto mag1 = std::sqrt(dx1 * dx1 + dy1 * dy1 + dz1 * dz1); - auto mag2 = std::sqrt(dx2 * dx2 + dy2 * dy2 + dz2 * dz2); - // angle between the vectors - auto cosTheta = dot / (mag1 * mag2); + auto mag1sq = dx1 * dx1 + dy1 * dy1 + dz1 * dz1; + auto mag2sq = dx2 * dx2 + dy2 * dy2 + dz2 * dz2; + + auto minCosTheta_sq = minCosTheta * minCosTheta; + bool isWithinLimits = (dotsq > minCosTheta_sq * mag1sq * mag2sq); + if (debug) { - LogDebug("HGCDoublet") << "-- Are Aligned -- dot: " << dot << " mag1: " << mag1 << " mag2: " << mag2 - << " cosTheta: " << cosTheta << " isWithinLimits: " << (cosTheta > minCosTheta) << std::endl; + LogDebug("HGCDoublet") << "-- Are Aligned -- dotsq: " << dotsq << " mag1sq: " << mag1sq << " mag2sq: " << mag2sq + << "minCosTheta_sq:" << minCosTheta_sq << " isWithinLimits: " << isWithinLimits << std::endl; } // Now check the compatibility with the pointing origin. @@ -102,15 +105,18 @@ int HGCDoublet::areAligned(double xi, const GlobalVector pointingDir = (seedIndex_ == -1) ? GlobalVector(innerX(), innerY(), innerZ()) : refDir; auto dot_pointing = pointingDir.dot(firstDoublet); - auto mag_pointing = sqrt(pointingDir.mag2()); - auto cosTheta_pointing = dot_pointing / (mag2 * mag_pointing); + auto dot_pointing_sq = dot_pointing * dot_pointing; + auto mag_pointing_sq = pointingDir.mag2(); + auto minCosPointing_sq = minCosPointing * minCosPointing; + bool isWithinLimitsPointing = (dot_pointing_sq > minCosPointing_sq * mag_pointing_sq * mag2sq); if (debug) { - LogDebug("HGCDoublet") << "-- Are Aligned -- dot_pointing: " << dot_pointing << " mag_pointing: " << mag_pointing - << " mag2: " << mag2 << " cosTheta_pointing: " << cosTheta_pointing - << " isWithinLimits: " << (cosTheta_pointing > minCosPointing) << std::endl; + LogDebug("HGCDoublet") << "-- Are Aligned -- dot_pointing_sq: " << dot_pointing * dot_pointing + << " mag_pointing_sq: " << mag_pointing_sq << " mag2sq: " << mag2sq + << " isWithinLimits: " << isWithinLimitsPointing << std::endl; } - - return (cosTheta > minCosTheta) && (cosTheta_pointing > minCosPointing); + // by squaring cosTheta and multiplying by the squares of the magnitudes + // an equivalent comparison is made without the division and square root which are costly FP ops. + return isWithinLimits && isWithinLimitsPointing; } void HGCDoublet::findNtuplets(std::vector &allDoublets, diff --git a/RecoHGCal/TICL/plugins/HGCGraph.cc b/RecoHGCal/TICL/plugins/HGCGraph.cc index 707a425b06e0f..e94b87426bc95 100644 --- a/RecoHGCal/TICL/plugins/HGCGraph.cc +++ b/RecoHGCal/TICL/plugins/HGCGraph.cc @@ -7,25 +7,30 @@ #include "HGCGraph.h" #include "DataFormats/Common/interface/ValueMap.h" -void HGCGraph::makeAndConnectDoublets(const TICLLayerTiles &histo, - const std::vector ®ions, - int nEtaBins, - int nPhiBins, - const std::vector &layerClusters, - const std::vector &mask, - const edm::ValueMap> &layerClustersTime, - int deltaIEta, - int deltaIPhi, - float minCosTheta, - float minCosPointing, - float etaLimitIncreaseWindow, - int missing_layers, - int maxNumberOfLayers, - float maxDeltaTime) { +template +void HGCGraphT::makeAndConnectDoublets(const TILES &histo, + const std::vector ®ions, + int nEtaBins, + int nPhiBins, + const std::vector &layerClusters, + const std::vector &mask, + const edm::ValueMap> &layerClustersTime, + int deltaIEta, + int deltaIPhi, + float minCosTheta, + float minCosPointing, + float root_doublet_max_distance_from_seed_squared, + float etaLimitIncreaseWindow, + int skip_layers, + int maxNumberOfLayers, + float maxDeltaTime) { isOuterClusterOfDoublets_.clear(); isOuterClusterOfDoublets_.resize(layerClusters.size()); allDoublets_.clear(); theRootDoublets_.clear(); + bool checkDistanceRootDoubletVsSeed = root_doublet_max_distance_from_seed_squared < 9999; + float origin_eta; + float origin_phi; for (const auto &r : regions) { bool isGlobal = (r.index == -1); auto zSide = r.zSide; @@ -36,19 +41,22 @@ void HGCGraph::makeAndConnectDoublets(const TICLLayerTiles &histo, startPhiBin = 0; endEtaBin = nEtaBins; endPhiBin = nPhiBins; + origin_eta = 0; + origin_phi = 0; } else { auto firstLayerOnZSide = maxNumberOfLayers * zSide; const auto &firstLayerHisto = histo[firstLayerOnZSide]; - - int entryEtaBin = firstLayerHisto.etaBin(r.origin.eta()); - int entryPhiBin = firstLayerHisto.phiBin(r.origin.phi()); + origin_eta = r.origin.eta(); + origin_phi = r.origin.phi(); + int entryEtaBin = firstLayerHisto.etaBin(origin_eta); + int entryPhiBin = firstLayerHisto.phiBin(origin_phi); // For track-seeded iterations, if the impact point is below a certain // eta-threshold, i.e., it has higher eta, make the initial search // window bigger in both eta and phi by one bin, to contain better low // energy showers. auto etaWindow = deltaIEta; auto phiWindow = deltaIPhi; - if (std::abs(r.origin.eta()) > etaLimitIncreaseWindow) { + if (std::abs(origin_eta) > etaLimitIncreaseWindow) { etaWindow++; phiWindow++; LogDebug("HGCGraph") << "Limit of Eta for increase: " << etaLimitIncreaseWindow @@ -59,9 +67,9 @@ void HGCGraph::makeAndConnectDoublets(const TICLLayerTiles &histo, startPhiBin = entryPhiBin - phiWindow; endPhiBin = entryPhiBin + phiWindow + 1; if (verbosity_ > Guru) { - LogDebug("HGCGraph") << " Entrance eta, phi: " << r.origin.eta() << ", " << r.origin.phi() + LogDebug("HGCGraph") << " Entrance eta, phi: " << origin_eta << ", " << origin_phi << " entryEtaBin: " << entryEtaBin << " entryPhiBin: " << entryPhiBin - << " globalBin: " << firstLayerHisto.globalBin(r.origin.eta(), r.origin.phi()) + << " globalBin: " << firstLayerHisto.globalBin(origin_eta, origin_phi) << " on layer: " << firstLayerOnZSide << " startEtaBin: " << startEtaBin << " endEtaBin: " << endEtaBin << " startPhiBin: " << startPhiBin << " endPhiBin: " << endPhiBin << " phiBin(0): " << firstLayerHisto.phiBin(0.) @@ -74,7 +82,7 @@ void HGCGraph::makeAndConnectDoublets(const TICLLayerTiles &histo, } for (int il = 0; il < maxNumberOfLayers - 1; ++il) { - for (int outer_layer = 0; outer_layer < std::min(1 + missing_layers, maxNumberOfLayers - 1 - il); ++outer_layer) { + for (int outer_layer = 0; outer_layer < std::min(1 + skip_layers, maxNumberOfLayers - 1 - il); ++outer_layer) { int currentInnerLayerId = il + maxNumberOfLayers * zSide; int currentOuterLayerId = currentInnerLayerId + 1 + outer_layer; auto const &outerLayerHisto = histo[currentOuterLayerId]; @@ -170,8 +178,17 @@ void HGCGraph::makeAndConnectDoublets(const TICLLayerTiles &histo, minCosTheta, minCosPointing, verbosity_ > Advanced); - if (isRootDoublet) + if (isRootDoublet and checkDistanceRootDoubletVsSeed) { + auto dEtaSquared = (layerClusters[innerClusterId].eta() - origin_eta); + dEtaSquared *= dEtaSquared; + auto dPhiSquared = (layerClusters[innerClusterId].phi() - origin_phi); + dPhiSquared *= dPhiSquared; + if (dEtaSquared + dPhiSquared > root_doublet_max_distance_from_seed_squared) + isRootDoublet = false; + } + if (isRootDoublet) { theRootDoublets_.push_back(doubletId); + } } } } @@ -189,10 +206,11 @@ void HGCGraph::makeAndConnectDoublets(const TICLLayerTiles &histo, // #endif } -bool HGCGraph::areTimeCompatible(int innerIdx, - int outerIdx, - const edm::ValueMap> &layerClustersTime, - float maxDeltaTime) { +template +bool HGCGraphT::areTimeCompatible(int innerIdx, + int outerIdx, + const edm::ValueMap> &layerClustersTime, + float maxDeltaTime) { float timeIn = layerClustersTime.get(innerIdx).first; float timeInE = layerClustersTime.get(innerIdx).second; float timeOut = layerClustersTime.get(outerIdx).first; @@ -203,11 +221,12 @@ bool HGCGraph::areTimeCompatible(int innerIdx, } //also return a vector of seedIndex for the reconstructed tracksters -void HGCGraph::findNtuplets(std::vector &foundNtuplets, - std::vector &seedIndices, - const unsigned int minClustersPerNtuplet, - const bool outInDFS, - unsigned int maxOutInHops) { +template +void HGCGraphT::findNtuplets(std::vector &foundNtuplets, + std::vector &seedIndices, + const unsigned int minClustersPerNtuplet, + const bool outInDFS, + unsigned int maxOutInHops) { HGCDoublet::HGCntuplet tmpNtuplet; tmpNtuplet.reserve(minClustersPerNtuplet); std::vector> outInToVisit; @@ -230,3 +249,6 @@ void HGCGraph::findNtuplets(std::vector &foundNtuplets, } } } + +template class HGCGraphT; +template class HGCGraphT; diff --git a/RecoHGCal/TICL/plugins/HGCGraph.h b/RecoHGCal/TICL/plugins/HGCGraph.h index 8f0a5fb3bedff..f8cc626016f40 100644 --- a/RecoHGCal/TICL/plugins/HGCGraph.h +++ b/RecoHGCal/TICL/plugins/HGCGraph.h @@ -11,9 +11,10 @@ #include "DataFormats/HGCalReco/interface/TICLSeedingRegion.h" #include "HGCDoublet.h" -class HGCGraph { +template +class HGCGraphT { public: - void makeAndConnectDoublets(const TICLLayerTiles &h, + void makeAndConnectDoublets(const TILES &h, const std::vector ®ions, int nEtaBins, int nPhiBins, @@ -24,8 +25,9 @@ class HGCGraph { int deltaIPhi, float minCosThetai, float maxCosPointing, + float root_doublet_max_distance_from_seed_squared, float etaLimitIncreaseWindow, - int missing_layers, + int skip_layers, int maxNumberOfLayers, float maxDeltaTime); diff --git a/RecoHGCal/TICL/plugins/MultiClustersFromTrackstersProducer.cc b/RecoHGCal/TICL/plugins/MultiClustersFromTrackstersProducer.cc index 195843d0015e8..27cf92d6c1265 100644 --- a/RecoHGCal/TICL/plugins/MultiClustersFromTrackstersProducer.cc +++ b/RecoHGCal/TICL/plugins/MultiClustersFromTrackstersProducer.cc @@ -63,13 +63,14 @@ void MultiClustersFromTrackstersProducer::produce(edm::Event& evt, const edm::Ev } std::for_each(std::begin(tracksters), std::end(tracksters), [&](auto const& trackster) { - // Do not create a multicluster if the trackster has no layer clusters. + // Create an empty multicluster if the trackster has no layer clusters. // This could happen when a seed leads to no trackster and a dummy one is produced. + + std::array baricenter{{0., 0., 0.}}; + double total_weight = 0.; + reco::HGCalMultiCluster temp; + int counter = 0; if (!trackster.vertices().empty()) { - std::array baricenter{{0., 0., 0.}}; - double total_weight = 0.; - reco::HGCalMultiCluster temp; - int counter = 0; std::for_each(std::begin(trackster.vertices()), std::end(trackster.vertices()), [&](unsigned int idx) { temp.push_back(clusterPtrs[idx]); auto fraction = 1.f / trackster.vertex_multiplicity(counter++); @@ -86,13 +87,13 @@ void MultiClustersFromTrackstersProducer::produce(edm::Event& evt, const edm::Ev std::begin(baricenter), std::end(baricenter), std::begin(baricenter), [&total_weight](double val) -> double { return val / total_weight; }); - temp.setEnergy(total_weight); - temp.setCorrectedEnergy(total_weight); - temp.setPosition(math::XYZPoint(baricenter[0], baricenter[1], baricenter[2])); - temp.setAlgoId(reco::CaloCluster::hgcal_em); - temp.setTime(trackster.time(), trackster.timeError()); - multiclusters->push_back(temp); } + temp.setEnergy(total_weight); + temp.setCorrectedEnergy(total_weight); + temp.setPosition(math::XYZPoint(baricenter[0], baricenter[1], baricenter[2])); + temp.setAlgoId(reco::CaloCluster::hgcal_em); + temp.setTime(trackster.time(), trackster.timeError()); + multiclusters->push_back(temp); }); evt.put(std::move(multiclusters)); diff --git a/RecoHGCal/TICL/plugins/PFTICLProducer.cc b/RecoHGCal/TICL/plugins/PFTICLProducer.cc index 443340944b39b..1c9f9b35035ed 100644 --- a/RecoHGCal/TICL/plugins/PFTICLProducer.cc +++ b/RecoHGCal/TICL/plugins/PFTICLProducer.cc @@ -36,7 +36,7 @@ PFTICLProducer::PFTICLProducer(const edm::ParameterSet& conf) void PFTICLProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { edm::ParameterSetDescription desc; - desc.add("ticlCandidateSrc", edm::InputTag("ticlCandidateFromTracksters")); + desc.add("ticlCandidateSrc", edm::InputTag("ticlTrackstersMerge")); descriptions.add("pfTICLProducer", desc); } @@ -52,6 +52,15 @@ void PFTICLProducer::produce(edm::StreamID, edm::Event& evt, const edm::EventSet const auto abs_pdg_id = std::abs(ticl_cand.pdgId()); const auto charge = ticl_cand.charge(); const auto& four_mom = ticl_cand.p4(); + double ecal_energy = 0.; + + for (const auto& t : ticl_cand.tracksters()) { + double ecal_energy_fraction = t->raw_em_pt() / t->raw_pt(); + ecal_energy += t->raw_energy() * ecal_energy_fraction; + } + double hcal_energy = ticl_cand.rawEnergy() - ecal_energy; + // fix for floating point rounding could go slightly below 0 + hcal_energy = hcal_energy < 0 ? 0 : hcal_energy; reco::PFCandidate::ParticleType part_type; switch (abs_pdg_id) { @@ -78,6 +87,8 @@ void PFTICLProducer::produce(edm::StreamID, edm::Event& evt, const edm::EventSet candidates->emplace_back(charge, four_mom, part_type); auto& candidate = candidates->back(); + candidate.setEcalEnergy(ecal_energy, ecal_energy); + candidate.setHcalEnergy(hcal_energy, hcal_energy); if (candidate.charge()) { // otherwise PFCandidate throws // Construct edm::Ref from edm::Ptr. As of now, assumes type to be reco::Track. To be extended (either via // dynamic type checking or configuration) if additional track types are needed. diff --git a/RecoHGCal/TICL/plugins/PatternRecognitionAlgoBase.h b/RecoHGCal/TICL/plugins/PatternRecognitionAlgoBase.h index e9e67e7d5c959..23edaac568a6f 100644 --- a/RecoHGCal/TICL/plugins/PatternRecognitionAlgoBase.h +++ b/RecoHGCal/TICL/plugins/PatternRecognitionAlgoBase.h @@ -7,7 +7,6 @@ #include #include #include "DataFormats/CaloRecHit/interface/CaloCluster.h" -#include "DataFormats/HGCalReco/interface/Common.h" #include "DataFormats/HGCalReco/interface/Trackster.h" #include "DataFormats/HGCalReco/interface/TICLLayerTile.h" #include "DataFormats/HGCalReco/interface/TICLSeedingRegion.h" @@ -21,11 +20,12 @@ namespace edm { } // namespace edm namespace ticl { - class PatternRecognitionAlgoBase { + template + class PatternRecognitionAlgoBaseT { public: - PatternRecognitionAlgoBase(const edm::ParameterSet& conf, const CacheBase* cache) + PatternRecognitionAlgoBaseT(const edm::ParameterSet& conf, const CacheBase* cache) : algo_verbosity_(conf.getParameter("algo_verbosity")) {} - virtual ~PatternRecognitionAlgoBase(){}; + virtual ~PatternRecognitionAlgoBaseT(){}; struct Inputs { const edm::Event& ev; @@ -33,7 +33,7 @@ namespace ticl { const std::vector& layerClusters; const std::vector& mask; const edm::ValueMap>& layerClustersTime; - const TICLLayerTiles& tiles; + const TILES& tiles; const std::vector& regions; Inputs(const edm::Event& eV, @@ -41,7 +41,7 @@ namespace ticl { const std::vector& lC, const std::vector& mS, const edm::ValueMap>& lT, - const TICLLayerTiles& tL, + const TILES& tL, const std::vector& rG) : ev(eV), es(eS), layerClusters(lC), mask(mS), layerClustersTime(lT), tiles(tL), regions(rG) {} }; diff --git a/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.cc b/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.cc index a782a56787efc..8340aa261fea8 100644 --- a/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.cc +++ b/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.cc @@ -7,25 +7,38 @@ #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/Utilities/interface/Exception.h" #include "PatternRecognitionbyCA.h" -#include "HGCGraph.h" #include "RecoLocalCalo/HGCalRecProducers/interface/ComputeClusterTime.h" #include "TrackstersPCA.h" +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "Geometry/Records/interface/CaloGeometryRecord.h" +#include "FWCore/Framework/interface/EventSetup.h" using namespace ticl; -PatternRecognitionbyCA::PatternRecognitionbyCA(const edm::ParameterSet &conf, const CacheBase *cache) - : PatternRecognitionAlgoBase(conf, cache), - theGraph_(std::make_unique()), +template +PatternRecognitionbyCA::PatternRecognitionbyCA(const edm::ParameterSet &conf, const CacheBase *cache) + : PatternRecognitionAlgoBaseT(conf, cache), + theGraph_(std::make_unique>()), oneTracksterPerTrackSeed_(conf.getParameter("oneTracksterPerTrackSeed")), promoteEmptyRegionToTrackster_(conf.getParameter("promoteEmptyRegionToTrackster")), out_in_dfs_(conf.getParameter("out_in_dfs")), max_out_in_hops_(conf.getParameter("max_out_in_hops")), min_cos_theta_(conf.getParameter("min_cos_theta")), min_cos_pointing_(conf.getParameter("min_cos_pointing")), + root_doublet_max_distance_from_seed_squared_( + conf.getParameter("root_doublet_max_distance_from_seed_squared")), etaLimitIncreaseWindow_(conf.getParameter("etaLimitIncreaseWindow")), - missing_layers_(conf.getParameter("missing_layers")), - min_clusters_per_ntuplet_(conf.getParameter("min_clusters_per_ntuplet")), + skip_layers_(conf.getParameter("skip_layers")), + max_missing_layers_in_trackster_(conf.getParameter("max_missing_layers_in_trackster")), + check_missing_layers_(max_missing_layers_in_trackster_ < 100), + shower_start_max_layer_(conf.getParameter("shower_start_max_layer")), + min_layers_per_trackster_(conf.getParameter("min_layers_per_trackster")), + filter_on_categories_(conf.getParameter>("filter_on_categories")), + pid_threshold_(conf.getParameter("pid_threshold")), + energy_em_over_total_threshold_(conf.getParameter("energy_em_over_total_threshold")), + max_longitudinal_sigmaPCA_(conf.getParameter("max_longitudinal_sigmaPCA")), + min_clusters_per_ntuplet_(min_layers_per_trackster_), max_delta_time_(conf.getParameter("max_delta_time")), eidInputName_(conf.getParameter("eid_input_name")), eidOutputNameEnergy_(conf.getParameter("eid_output_name_energy")), @@ -43,31 +56,41 @@ PatternRecognitionbyCA::PatternRecognitionbyCA(const edm::ParameterSet &conf, co eidSession_ = tensorflow::createSession(trackstersCache->eidGraphDef); } -PatternRecognitionbyCA::~PatternRecognitionbyCA(){}; +template +PatternRecognitionbyCA::~PatternRecognitionbyCA(){}; -void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::Inputs &input, - std::vector &result, - std::unordered_map> &seedToTracksterAssociation) { +template +void PatternRecognitionbyCA::makeTracksters( + const typename PatternRecognitionAlgoBaseT::Inputs &input, + std::vector &result, + std::unordered_map> &seedToTracksterAssociation) { // Protect from events with no seeding regions if (input.regions.empty()) return; - rhtools_.getEventSetup(input.es); + edm::ESHandle geom; + edm::EventSetup const &es = input.es; + es.get().get(geom); + rhtools_.setGeometry(*geom); - theGraph_->setVerbosity(algo_verbosity_); + theGraph_->setVerbosity(PatternRecognitionAlgoBaseT::algo_verbosity_); theGraph_->clear(); - if (algo_verbosity_ > None) { + if (PatternRecognitionAlgoBaseT::algo_verbosity_ > PatternRecognitionAlgoBaseT::None) { LogDebug("HGCPatternRecoByCA") << "Making Tracksters with CA" << std::endl; } + int type = input.tiles[0].typeT(); + int nEtaBin = (type == 1) ? ticl::TileConstantsHFNose::nEtaBins : ticl::TileConstants::nEtaBins; + int nPhiBin = (type == 1) ? ticl::TileConstantsHFNose::nPhiBins : ticl::TileConstants::nPhiBins; + bool isRegionalIter = (input.regions[0].index != -1); std::vector foundNtuplets; std::vector seedIndices; std::vector layer_cluster_usage(input.layerClusters.size(), 0); theGraph_->makeAndConnectDoublets(input.tiles, input.regions, - ticl::TileConstants::nEtaBins, - ticl::TileConstants::nPhiBins, + nEtaBin, + nPhiBin, input.layerClusters, input.mask, input.layerClustersTime, @@ -75,17 +98,24 @@ void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::In 1, min_cos_theta_, min_cos_pointing_, + root_doublet_max_distance_from_seed_squared_, etaLimitIncreaseWindow_, - missing_layers_, - rhtools_.lastLayerFH(), + skip_layers_, + rhtools_.lastLayer(type), max_delta_time_); theGraph_->findNtuplets(foundNtuplets, seedIndices, min_clusters_per_ntuplet_, out_in_dfs_, max_out_in_hops_); //#ifdef FP_DEBUG const auto &doublets = theGraph_->getAllDoublets(); - int tracksterId = 0; + int tracksterId = -1; + + // container for holding tracksters before selection + std::vector tmpTracksters; + tmpTracksters.reserve(foundNtuplets.size()); for (auto const &ntuplet : foundNtuplets) { + tracksterId++; + std::set effective_cluster_idx; std::pair::iterator, bool> retVal; @@ -114,7 +144,7 @@ void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::In } } - if (algo_verbosity_ > Advanced) { + if (PatternRecognitionAlgoBaseT::algo_verbosity_ > PatternRecognitionAlgoBaseT::Advanced) { LogDebug("HGCPatternRecoByCA") << " New doublet " << doublet << " for trackster: " << result.size() << " InnerCl " << innerCluster << " " << input.layerClusters[innerCluster].x() << " " << input.layerClusters[innerCluster].y() << " " @@ -124,41 +154,113 @@ void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::In << input.layerClusters[outerCluster].z() << " " << tracksterId << std::endl; } } + unsigned showerMinLayerId = 99999; + std::vector uniqueLayerIds; + uniqueLayerIds.reserve(effective_cluster_idx.size()); + std::vector> lcIdAndLayer; + lcIdAndLayer.reserve(effective_cluster_idx.size()); for (auto const i : effective_cluster_idx) { - layer_cluster_usage[i]++; - if (algo_verbosity_ > Basic) - LogDebug("HGCPatternRecoByCA") << "LayerID: " << i << " count: " << (int)layer_cluster_usage[i] << std::endl; + auto const &haf = input.layerClusters[i].hitsAndFractions(); + auto layerId = rhtools_.getLayerWithOffset(haf[0].first); + showerMinLayerId = std::min(layerId, showerMinLayerId); + uniqueLayerIds.push_back(layerId); + lcIdAndLayer.emplace_back(i, layerId); } - // Put back indices, in the form of a Trackster, into the results vector - Trackster tmp; - tmp.vertices().reserve(effective_cluster_idx.size()); - tmp.vertex_multiplicity().resize(effective_cluster_idx.size(), 0); - //regions and seedIndices can have different size - //if a seeding region does not lead to any trackster - tmp.setSeed(input.regions[0].collectionID, seedIndices[tracksterId]); - if (isRegionalIter) { - seedToTracksterAssociation[tmp.seedIndex()].push_back(tracksterId); + std::sort(uniqueLayerIds.begin(), uniqueLayerIds.end()); + uniqueLayerIds.erase(std::unique(uniqueLayerIds.begin(), uniqueLayerIds.end()), uniqueLayerIds.end()); + unsigned int numberOfLayersInTrackster = uniqueLayerIds.size(); + if (check_missing_layers_) { + int numberOfMissingLayers = 0; + unsigned int j = showerMinLayerId; + unsigned int indexInVec = 0; + for (const auto &layer : uniqueLayerIds) { + if (layer != j) { + numberOfMissingLayers++; + j++; + if (numberOfMissingLayers > max_missing_layers_in_trackster_) { + numberOfLayersInTrackster = indexInVec; + for (auto &llpair : lcIdAndLayer) { + if (llpair.second >= layer) { + effective_cluster_idx.erase(llpair.first); + } + } + break; + } + } + indexInVec++; + j++; + } } - std::pair timeTrackster(-99., -1.); - hgcalsimclustertime::ComputeClusterTime timeEstimator; - timeTrackster = timeEstimator.fixSizeHighestDensity(times, timeErrors); - tmp.setTimeAndError(timeTrackster.first, timeTrackster.second); - std::copy(std::begin(effective_cluster_idx), std::end(effective_cluster_idx), std::back_inserter(tmp.vertices())); - result.push_back(tmp); - tracksterId++; + if ((numberOfLayersInTrackster >= min_layers_per_trackster_) and (showerMinLayerId <= shower_start_max_layer_)) { + // Put back indices, in the form of a Trackster, into the results vector + Trackster tmp; + tmp.vertices().reserve(effective_cluster_idx.size()); + tmp.vertex_multiplicity().resize(effective_cluster_idx.size(), 1); + //regions and seedIndices can have different size + //if a seeding region does not lead to any trackster + tmp.setSeed(input.regions[0].collectionID, seedIndices[tracksterId]); + + std::pair timeTrackster(-99., -1.); + hgcalsimclustertime::ComputeClusterTime timeEstimator; + timeTrackster = timeEstimator.fixSizeHighestDensity(times, timeErrors); + tmp.setTimeAndError(timeTrackster.first, timeTrackster.second); + std::copy(std::begin(effective_cluster_idx), std::end(effective_cluster_idx), std::back_inserter(tmp.vertices())); + tmpTracksters.push_back(tmp); + } + } + ticl::assignPCAtoTracksters( + tmpTracksters, input.layerClusters, rhtools_.getPositionLayer(rhtools_.lastLayerEE(type)).z()); + + // run energy regression and ID + energyRegressionAndID(input.layerClusters, tmpTracksters); + // Filter results based on PID criteria or EM/Total energy ratio. + // We want to **keep** tracksters whose cumulative + // probability summed up over the selected categories + // is greater than the chosen threshold. Therefore + // the filtering function should **discard** all + // tracksters **below** the threshold. + auto filter_on_pids = [&](Trackster &t) -> bool { + auto cumulative_prob = 0.; + for (auto index : filter_on_categories_) { + cumulative_prob += t.id_probabilities(index); + } + return (cumulative_prob <= pid_threshold_) && + (t.raw_em_energy() < energy_em_over_total_threshold_ * t.raw_energy()); + }; + + std::vector selectedTrackstersIds; + for (unsigned i = 0; i < tmpTracksters.size(); ++i) { + if (!filter_on_pids(tmpTracksters[i]) and tmpTracksters[i].sigmasPCA()[0] < max_longitudinal_sigmaPCA_) { + selectedTrackstersIds.push_back(i); + } + } + + result.reserve(selectedTrackstersIds.size()); + + for (unsigned i = 0; i < selectedTrackstersIds.size(); ++i) { + const auto &t = tmpTracksters[selectedTrackstersIds[i]]; + for (auto const lcId : t.vertices()) { + layer_cluster_usage[lcId]++; + if (PatternRecognitionAlgoBaseT::algo_verbosity_ > PatternRecognitionAlgoBaseT::Basic) + LogDebug("HGCPatternRecoByCA") << "LayerID: " << lcId << " count: " << (int)layer_cluster_usage[lcId] + << std::endl; + } + if (isRegionalIter) { + seedToTracksterAssociation[t.seedIndex()].push_back(i); + } + result.push_back(t); } for (auto &trackster : result) { assert(trackster.vertices().size() <= trackster.vertex_multiplicity().size()); for (size_t i = 0; i < trackster.vertices().size(); ++i) { trackster.vertex_multiplicity()[i] = layer_cluster_usage[trackster.vertices(i)]; - if (algo_verbosity_ > Basic) + if (PatternRecognitionAlgoBaseT::algo_verbosity_ > PatternRecognitionAlgoBaseT::Basic) LogDebug("HGCPatternRecoByCA") << "LayerID: " << trackster.vertices(i) << " count: " << (int)trackster.vertex_multiplicity(i) << std::endl; } } - // Now decide if the tracksters from the track-based iterations have to be merged if (oneTracksterPerTrackSeed_) { std::vector tmp; @@ -166,8 +268,7 @@ void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::In tmp.swap(result); } - ticl::assignPCAtoTracksters(result, input.layerClusters, rhtools_.getPositionLayer(rhtools_.lastLayerEE()).z()); - + ticl::assignPCAtoTracksters(result, input.layerClusters, rhtools_.getPositionLayer(rhtools_.lastLayerEE(type)).z()); // run energy regression and ID energyRegressionAndID(input.layerClusters, result); @@ -177,7 +278,7 @@ void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::In emptyTrackstersFromSeedsTRK(result, seedToTracksterAssociation, input.regions[0].collectionID); } - if (algo_verbosity_ > Advanced) { + if (PatternRecognitionAlgoBaseT::algo_verbosity_ > PatternRecognitionAlgoBaseT::Advanced) { for (auto &trackster : result) { LogDebug("HGCPatternRecoByCA") << "Trackster characteristics: " << std::endl; LogDebug("HGCPatternRecoByCA") << "Size: " << trackster.vertices().size() << std::endl; @@ -190,7 +291,8 @@ void PatternRecognitionbyCA::makeTracksters(const PatternRecognitionAlgoBase::In theGraph_->clear(); } -void PatternRecognitionbyCA::mergeTrackstersTRK( +template +void PatternRecognitionbyCA::mergeTrackstersTRK( const std::vector &input, const std::vector &layerClusters, std::vector &output, @@ -207,7 +309,7 @@ void PatternRecognitionbyCA::mergeTrackstersTRK( for (unsigned int j = 1; j < numberOfTrackstersInSeed; ++j) { auto &thisTrackster = input[tracksters[j]]; updated_size += thisTrackster.vertices().size(); - if (algo_verbosity_ > Basic) { + if (PatternRecognitionAlgoBaseT::algo_verbosity_ > PatternRecognitionAlgoBaseT::Basic) { LogDebug("HGCPatternRecoByCA") << "Updated size: " << updated_size << std::endl; } outTrackster.vertices().reserve(updated_size); @@ -225,7 +327,8 @@ void PatternRecognitionbyCA::mergeTrackstersTRK( output.shrink_to_fit(); } -void PatternRecognitionbyCA::emptyTrackstersFromSeedsTRK( +template +void PatternRecognitionbyCA::emptyTrackstersFromSeedsTRK( std::vector &tracksters, std::unordered_map> &seedToTracksterAssociation, const edm::ProductID &collectionID) const { @@ -242,8 +345,9 @@ void PatternRecognitionbyCA::emptyTrackstersFromSeedsTRK( } } -void PatternRecognitionbyCA::energyRegressionAndID(const std::vector &layerClusters, - std::vector &tracksters) { +template +void PatternRecognitionbyCA::energyRegressionAndID(const std::vector &layerClusters, + std::vector &tracksters) { // Energy regression and particle identification strategy: // // 1. Set default values for regressed energy and particle id for each trackster. @@ -379,3 +483,6 @@ void PatternRecognitionbyCA::energyRegressionAndID(const std::vector; +template class ticl::PatternRecognitionbyCA; diff --git a/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.h b/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.h index c39f21d9ec40b..c337dd344ec78 100644 --- a/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.h +++ b/RecoHGCal/TICL/plugins/PatternRecognitionbyCA.h @@ -7,16 +7,16 @@ #include "RecoHGCal/TICL/plugins/PatternRecognitionAlgoBase.h" #include "RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h" #include "PhysicsTools/TensorFlow/interface/TensorFlow.h" - -class HGCGraph; +#include "HGCGraph.h" namespace ticl { - class PatternRecognitionbyCA final : public PatternRecognitionAlgoBase { + template + class PatternRecognitionbyCA final : public PatternRecognitionAlgoBaseT { public: PatternRecognitionbyCA(const edm::ParameterSet& conf, const CacheBase* cache); ~PatternRecognitionbyCA() override; - void makeTracksters(const PatternRecognitionAlgoBase::Inputs& input, + void makeTracksters(const typename PatternRecognitionAlgoBaseT::Inputs& input, std::vector& result, std::unordered_map>& seedToTracksterAssociation) override; @@ -30,15 +30,24 @@ namespace ticl { const std::vector&, std::vector&, std::unordered_map>& seedToTracksterAssociation) const; - const std::unique_ptr theGraph_; + const std::unique_ptr> theGraph_; const bool oneTracksterPerTrackSeed_; const bool promoteEmptyRegionToTrackster_; const bool out_in_dfs_; const unsigned int max_out_in_hops_; const float min_cos_theta_; const float min_cos_pointing_; + const float root_doublet_max_distance_from_seed_squared_; const float etaLimitIncreaseWindow_; - const int missing_layers_; + const int skip_layers_; + const int max_missing_layers_in_trackster_; + bool check_missing_layers_ = false; + const unsigned int shower_start_max_layer_; + const unsigned int min_layers_per_trackster_; + const std::vector filter_on_categories_; + const double pid_threshold_; + const double energy_em_over_total_threshold_; + const double max_longitudinal_sigmaPCA_; const int min_clusters_per_ntuplet_; const float max_delta_time_; const std::string eidInputName_; @@ -53,5 +62,6 @@ namespace ticl { static const int eidNFeatures_ = 3; }; + } // namespace ticl #endif diff --git a/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.cc b/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.cc index 59159b49ec568..68f303b5a0bc3 100644 --- a/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.cc +++ b/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.cc @@ -1,8 +1,9 @@ #include "PatternRecognitionbyMultiClusters.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" -void ticl::PatternRecognitionbyMultiClusters::makeTracksters( - const PatternRecognitionAlgoBase::Inputs& input, +template +void ticl::PatternRecognitionbyMultiClusters::makeTracksters( + const typename PatternRecognitionAlgoBaseT::Inputs& input, std::vector& result, std::unordered_map>& seedToTracksterAssociation) { LogDebug("HGCPatterRecoTrackster") << "making Tracksters" << std::endl; diff --git a/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.h b/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.h index d787a6002397d..027cebb8c3033 100644 --- a/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.h +++ b/RecoHGCal/TICL/plugins/PatternRecognitionbyMultiClusters.h @@ -14,13 +14,14 @@ namespace edm { } // namespace edm namespace ticl { - class PatternRecognitionbyMultiClusters final : public PatternRecognitionAlgoBase { + template + class PatternRecognitionbyMultiClusters final : public PatternRecognitionAlgoBaseT { public: PatternRecognitionbyMultiClusters(const edm::ParameterSet& conf, const CacheBase* cache) - : PatternRecognitionAlgoBase(conf, cache) {} + : PatternRecognitionAlgoBaseT(conf, cache) {} ~PatternRecognitionbyMultiClusters() override{}; - void makeTracksters(const PatternRecognitionAlgoBase::Inputs& input, + void makeTracksters(const typename PatternRecognitionAlgoBaseT::Inputs& input, std::vector& result, std::unordered_map>& seedToTracksterAssociation) override; }; diff --git a/RecoHGCal/TICL/plugins/SeedingRegionByTracks.cc b/RecoHGCal/TICL/plugins/SeedingRegionByTracks.cc index e384db9b7fb8e..16d69c391f38a 100644 --- a/RecoHGCal/TICL/plugins/SeedingRegionByTracks.cc +++ b/RecoHGCal/TICL/plugins/SeedingRegionByTracks.cc @@ -17,19 +17,23 @@ SeedingRegionByTracks::SeedingRegionByTracks(const edm::ParameterSet &conf, edm: : SeedingRegionAlgoBase(conf, sumes), tracks_token_(sumes.consumes(conf.getParameter("tracks"))), cutTk_(conf.getParameter("cutTk")), - propName_(conf.getParameter("propagator")) {} + propName_(conf.getParameter("propagator")), + hdc_token_(sumes.esConsumes( + edm::ESInputTag("", detectorName_))), + bfield_token_(sumes.esConsumes()), + propagator_token_(sumes.esConsumes( + edm::ESInputTag("", propName_))) {} SeedingRegionByTracks::~SeedingRegionByTracks() {} void SeedingRegionByTracks::initialize(const edm::EventSetup &es) { - edm::ESHandle hdc; - es.get().get(detectorName_, hdc); + edm::ESHandle hdc = es.getHandle(hdc_token_); hgcons_ = hdc.product(); buildFirstLayers(); - es.get().get(bfield_); - es.get().get(propName_, propagator_); + bfield_ = es.getHandle(bfield_token_); + propagator_ = es.getHandle(propagator_token_); } void SeedingRegionByTracks::makeRegions(const edm::Event &ev, @@ -55,6 +59,10 @@ void SeedingRegionByTracks::makeRegions(const edm::Event &ev, result.emplace_back(tsos.globalPosition(), tsos.globalMomentum(), iSide, i, trkId); } } + // sorting seeding region by descending momentum + std::sort(result.begin(), result.end(), [](const TICLSeedingRegion &a, const TICLSeedingRegion &b) { + return a.directionAtOrigin.perp2() > b.directionAtOrigin.perp2(); + }); } void SeedingRegionByTracks::buildFirstLayers() { diff --git a/RecoHGCal/TICL/plugins/SeedingRegionByTracks.h b/RecoHGCal/TICL/plugins/SeedingRegionByTracks.h index a0b56e22e728c..d8d9f61214c02 100644 --- a/RecoHGCal/TICL/plugins/SeedingRegionByTracks.h +++ b/RecoHGCal/TICL/plugins/SeedingRegionByTracks.h @@ -22,8 +22,10 @@ #include "TrackingTools/Records/interface/TrackingComponentsRecord.h" #include "Geometry/CommonDetUnit/interface/GeomDet.h" #include "CommonTools/Utils/interface/StringCutObjectSelector.h" - -class HGCGraph; +#include "FWCore/Utilities/interface/ESGetToken.h" +#include "Geometry/Records/interface/IdealGeometryRecord.h" +#include "MagneticField/Records/interface/IdealMagneticFieldRecord.h" +#include "TrackingTools/Records/interface/TrackingComponentsRecord.h" namespace ticl { class SeedingRegionByTracks final : public SeedingRegionAlgoBase { @@ -47,6 +49,9 @@ namespace ticl { const std::string propName_; edm::ESHandle bfield_; std::unique_ptr firstDisk_[2]; + edm::ESGetToken hdc_token_; + edm::ESGetToken bfield_token_; + edm::ESGetToken propagator_token_; }; } // namespace ticl #endif diff --git a/RecoHGCal/TICL/plugins/TICLLayerTileProducer.cc b/RecoHGCal/TICL/plugins/TICLLayerTileProducer.cc index 541f08354e404..e3b1c633eddbf 100644 --- a/RecoHGCal/TICL/plugins/TICLLayerTileProducer.cc +++ b/RecoHGCal/TICL/plugins/TICLLayerTileProducer.cc @@ -7,9 +7,9 @@ #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/Utilities/interface/ESGetToken.h" #include "DataFormats/CaloRecHit/interface/CaloCluster.h" -#include "DataFormats/HGCalReco/interface/Common.h" #include "DataFormats/HGCalReco/interface/TICLLayerTile.h" #include "RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h" @@ -24,41 +24,72 @@ class TICLLayerTileProducer : public edm::stream::EDProducer<> { private: edm::EDGetTokenT> clusters_token_; + edm::EDGetTokenT> clusters_HFNose_token_; + edm::ESGetToken geometry_token_; hgcal::RecHitTools rhtools_; + std::string detector_; + bool doNose_; }; -TICLLayerTileProducer::TICLLayerTileProducer(const edm::ParameterSet &ps) { +TICLLayerTileProducer::TICLLayerTileProducer(const edm::ParameterSet &ps) + : detector_(ps.getParameter("detector")) { + clusters_HFNose_token_ = + consumes>(ps.getParameter("layer_HFNose_clusters")); clusters_token_ = consumes>(ps.getParameter("layer_clusters")); + geometry_token_ = esConsumes(); - produces(); + doNose_ = (detector_ == "HFNose"); + + if (doNose_) + produces(); + else + produces(); } -void TICLLayerTileProducer::beginRun(edm::Run const &, edm::EventSetup const &es) { rhtools_.getEventSetup(es); } +void TICLLayerTileProducer::beginRun(edm::Run const &, edm::EventSetup const &es) { + edm::ESHandle geom = es.getHandle(geometry_token_); + rhtools_.setGeometry(*geom); +} void TICLLayerTileProducer::produce(edm::Event &evt, const edm::EventSetup &) { auto result = std::make_unique(); + auto resultHFNose = std::make_unique(); edm::Handle> cluster_h; - evt.getByToken(clusters_token_, cluster_h); + if (doNose_) + evt.getByToken(clusters_HFNose_token_, cluster_h); + else + evt.getByToken(clusters_token_, cluster_h); + const auto &layerClusters = *cluster_h; int lcId = 0; for (auto const &lc : layerClusters) { const auto firstHitDetId = lc.hitsAndFractions()[0].first; int layer = rhtools_.getLayerWithOffset(firstHitDetId) + - rhtools_.lastLayerFH() * ((rhtools_.zside(firstHitDetId) + 1) >> 1) - 1; + rhtools_.lastLayer(doNose_) * ((rhtools_.zside(firstHitDetId) + 1) >> 1) - 1; + assert(layer >= 0); - result->fill(layer, lc.eta(), lc.phi(), lcId); + + if (doNose_) + resultHFNose->fill(layer, lc.eta(), lc.phi(), lcId); + else + result->fill(layer, lc.eta(), lc.phi(), lcId); LogDebug("TICLLayerTileProducer") << "Adding layerClusterId: " << lcId << " into bin [eta,phi]: [ " << (*result)[layer].etaBin(lc.eta()) << ", " << (*result)[layer].phiBin(lc.phi()) << "] for layer: " << layer << std::endl; lcId++; } - evt.put(std::move(result)); + if (doNose_) + evt.put(std::move(resultHFNose)); + else + evt.put(std::move(result)); } void TICLLayerTileProducer::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { edm::ParameterSetDescription desc; + desc.add("detector", "HGCAL"); desc.add("layer_clusters", edm::InputTag("hgcalLayerClusters")); + desc.add("layer_HFNose_clusters", edm::InputTag("hgcalLayerClustersHFNose")); descriptions.add("ticlLayerTileProducer", desc); } diff --git a/RecoHGCal/TICL/plugins/TICLSeedingRegionProducer.cc b/RecoHGCal/TICL/plugins/TICLSeedingRegionProducer.cc index 152c7b7898eb4..b886b69aef8e0 100644 --- a/RecoHGCal/TICL/plugins/TICLSeedingRegionProducer.cc +++ b/RecoHGCal/TICL/plugins/TICLSeedingRegionProducer.cc @@ -59,7 +59,7 @@ void TICLSeedingRegionProducer::fillDescriptions(edm::ConfigurationDescriptions& desc.add("tracks", edm::InputTag("generalTracks")); desc.add("cutTk", "1.48 < abs(eta) < 3.0 && pt > 1. && quality(\"highPurity\") && " - "hitPattern().numberOfLostHits(\"MISSING_OUTER_HITS\") < 10"); + "hitPattern().numberOfLostHits(\"MISSING_OUTER_HITS\") < 5"); desc.add("propagator", "PropagatorWithMaterial"); desc.add("algoId", 1); descriptions.add("ticlSeedingRegionProducer", desc); diff --git a/RecoHGCal/TICL/plugins/TrackstersMergeProducer.cc b/RecoHGCal/TICL/plugins/TrackstersMergeProducer.cc index b0a2ec14a4427..03e36a7187786 100644 --- a/RecoHGCal/TICL/plugins/TrackstersMergeProducer.cc +++ b/RecoHGCal/TICL/plugins/TrackstersMergeProducer.cc @@ -1,9 +1,9 @@ #include // unique_ptr - #include "FWCore/Framework/interface/stream/EDProducer.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" +#include "FWCore/Utilities/interface/ESGetToken.h" #include "DataFormats/CaloRecHit/interface/CaloCluster.h" #include "DataFormats/HGCalReco/interface/Common.h" @@ -14,6 +14,7 @@ #include "RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h" #include "RecoHGCal/TICL/plugins/GlobalCache.h" #include "PhysicsTools/TensorFlow/interface/TensorFlow.h" +#include "DataFormats/HGCalReco/interface/TICLCandidate.h" #include "TrackstersPCA.h" @@ -32,29 +33,38 @@ class TrackstersMergeProducer : public edm::stream::EDProducer &, TracksterIterIndex); void printTrackstersDebug(const std::vector &, const char *label) const; void dumpTrackster(const Trackster &) const; + const edm::EDGetTokenT> tracksterstrkem_token_; const edm::EDGetTokenT> trackstersem_token_; const edm::EDGetTokenT> tracksterstrk_token_; const edm::EDGetTokenT> trackstershad_token_; const edm::EDGetTokenT> seedingTrk_token_; const edm::EDGetTokenT> clusters_token_; const edm::EDGetTokenT> tracks_token_; + const edm::ESGetToken geometry_token_; const bool optimiseAcrossTracksters_; const int eta_bin_window_; const int phi_bin_window_; const double pt_sigma_high_; const double pt_sigma_low_; + const double halo_max_distance2_; + const double track_min_pt_; + const double track_min_eta_; + const double track_max_eta_; + const int track_max_missing_outerhits_; const double cosangle_align_; const double e_over_h_threshold_; const double pt_neutral_threshold_; - const double resol_calo_offset_; - const double resol_calo_scale_; + const double resol_calo_offset_had_; + const double resol_calo_scale_had_; + const double resol_calo_offset_em_; + const double resol_calo_scale_em_; const bool debug_; const std::string eidInputName_; const std::string eidOutputNameEnergy_; @@ -70,22 +80,31 @@ class TrackstersMergeProducer : public edm::stream::EDProducer>(ps.getParameter("trackstersem"))), + : tracksterstrkem_token_(consumes>(ps.getParameter("tracksterstrkem"))), + trackstersem_token_(consumes>(ps.getParameter("trackstersem"))), tracksterstrk_token_(consumes>(ps.getParameter("tracksterstrk"))), trackstershad_token_(consumes>(ps.getParameter("trackstershad"))), seedingTrk_token_(consumes>(ps.getParameter("seedingTrk"))), clusters_token_(consumes>(ps.getParameter("layer_clusters"))), tracks_token_(consumes>(ps.getParameter("tracks"))), + geometry_token_(esConsumes()), optimiseAcrossTracksters_(ps.getParameter("optimiseAcrossTracksters")), eta_bin_window_(ps.getParameter("eta_bin_window")), phi_bin_window_(ps.getParameter("phi_bin_window")), pt_sigma_high_(ps.getParameter("pt_sigma_high")), pt_sigma_low_(ps.getParameter("pt_sigma_low")), + halo_max_distance2_(ps.getParameter("halo_max_distance2")), + track_min_pt_(ps.getParameter("track_min_pt")), + track_min_eta_(ps.getParameter("track_min_eta")), + track_max_eta_(ps.getParameter("track_max_eta")), + track_max_missing_outerhits_(ps.getParameter("track_max_missing_outerhits")), cosangle_align_(ps.getParameter("cosangle_align")), e_over_h_threshold_(ps.getParameter("e_over_h_threshold")), pt_neutral_threshold_(ps.getParameter("pt_neutral_threshold")), - resol_calo_offset_(ps.getParameter("resol_calo_offset")), - resol_calo_scale_(ps.getParameter("resol_calo_scale")), + resol_calo_offset_had_(ps.getParameter("resol_calo_offset_had")), + resol_calo_scale_had_(ps.getParameter("resol_calo_scale_had")), + resol_calo_offset_em_(ps.getParameter("resol_calo_offset_em")), + resol_calo_scale_em_(ps.getParameter("resol_calo_scale_em")), debug_(ps.getParameter("debug")), eidInputName_(ps.getParameter("eid_input_name")), eidOutputNameEnergy_(ps.getParameter("eid_output_name_energy")), @@ -103,6 +122,7 @@ TrackstersMergeProducer::TrackstersMergeProducer(const edm::ParameterSet &ps, co eidSession_ = tensorflow::createSession(trackstersCache->eidGraphDef); produces>(); + produces>(); } void TrackstersMergeProducer::fillTile(TICLTracksterTiles &tracksterTile, @@ -143,16 +163,22 @@ void TrackstersMergeProducer::dumpTrackster(const Trackster &t) const { } void TrackstersMergeProducer::produce(edm::Event &evt, const edm::EventSetup &es) { - rhtools_.getEventSetup(es); - auto result = std::make_unique>(); - auto mergedTrackstersTRK = std::make_unique>(); + edm::ESHandle geom = es.getHandle(geometry_token_); + rhtools_.setGeometry(*geom); + auto resultTrackstersMerged = std::make_unique>(); + auto resultCandidates = std::make_unique>(); TICLTracksterTiles tracksterTile; - std::vector usedTrackstersEM; - std::vector usedTrackstersTRK; - std::vector usedTrackstersHAD; + std::vector usedTrackstersMerged; + std::vector indexInMergedCollTRKEM; + std::vector indexInMergedCollEM; + std::vector indexInMergedCollTRK; + std::vector indexInMergedCollHAD; std::vector usedSeeds; + // associating seed to the index of the trackster in the merged collection and the iteration that found it + std::map>> seedToTracksterAssociator; + std::vector iterMergedTracksters; edm::Handle> track_h; evt.getByToken(tracks_token_, track_h); const auto &tracks = *track_h; @@ -164,185 +190,345 @@ void TrackstersMergeProducer::produce(edm::Event &evt, const edm::EventSetup &es edm::Handle> trackstersem_h; evt.getByToken(trackstersem_token_, trackstersem_h); const auto &trackstersEM = *trackstersem_h; - usedTrackstersEM.resize(trackstersEM.size(), false); + + edm::Handle> tracksterstrkem_h; + evt.getByToken(tracksterstrkem_token_, tracksterstrkem_h); + const auto &trackstersTRKEM = *tracksterstrkem_h; edm::Handle> tracksterstrk_h; evt.getByToken(tracksterstrk_token_, tracksterstrk_h); const auto &trackstersTRK = *tracksterstrk_h; - usedTrackstersTRK.resize(trackstersTRK.size(), false); edm::Handle> trackstershad_h; evt.getByToken(trackstershad_token_, trackstershad_h); const auto &trackstersHAD = *trackstershad_h; - usedTrackstersHAD.resize(trackstersHAD.size(), false); edm::Handle> seedingTrk_h; evt.getByToken(seedingTrk_token_, seedingTrk_h); const auto &seedingTrk = *seedingTrk_h; - usedSeeds.resize(seedingTrk.size(), false); + usedSeeds.resize(tracks.size(), false); + fillTile(tracksterTile, trackstersTRKEM, TracksterIterIndex::TRKEM); fillTile(tracksterTile, trackstersEM, TracksterIterIndex::EM); fillTile(tracksterTile, trackstersTRK, TracksterIterIndex::TRK); fillTile(tracksterTile, trackstersHAD, TracksterIterIndex::HAD); - auto seedId = 0; - for (auto const &s : seedingTrk) { - tracksterTile.fill(TracksterIterIndex::SEED, s.origin.eta(), s.origin.phi(), seedId++); - - if (debug_) { - LogDebug("TrackstersMergeProducer") - << "Seed index: " << seedId << " internal index: " << s.index << " origin: " << s.origin - << " mom: " << s.directionAtOrigin << " pt: " << std::sqrt(s.directionAtOrigin.perp2()) - << " zSide: " << s.zSide << " collectionID: " << s.collectionID << " track pt " << tracks[s.index].pt() - << std::endl; - } - } + auto totalNumberOfTracksters = + trackstersTRKEM.size() + trackstersTRK.size() + trackstersEM.size() + trackstersHAD.size(); + resultTrackstersMerged->reserve(totalNumberOfTracksters); + iterMergedTracksters.reserve(totalNumberOfTracksters); + usedTrackstersMerged.resize(totalNumberOfTracksters, false); + indexInMergedCollTRKEM.reserve(trackstersTRKEM.size()); + indexInMergedCollEM.reserve(trackstersEM.size()); + indexInMergedCollTRK.reserve(trackstersTRK.size()); + indexInMergedCollHAD.reserve(trackstersHAD.size()); if (debug_) { - printTrackstersDebug(trackstersTRK, "tracksterTRK"); + printTrackstersDebug(trackstersTRKEM, "tracksterTRKEM"); printTrackstersDebug(trackstersEM, "tracksterEM"); + printTrackstersDebug(trackstersTRK, "tracksterTRK"); printTrackstersDebug(trackstersHAD, "tracksterHAD"); } - int tracksterTRK_idx = 0; - int tracksterHAD_idx = 0; - if (optimiseAcrossTracksters_) { - for (auto const &t : trackstersTRK) { - if (debug_) { - int entryEtaBin = tracksterTile[TracksterIterIndex::TRK].etaBin(t.barycenter().eta()); - int entryPhiBin = tracksterTile[TracksterIterIndex::TRK].phiBin(t.barycenter().phi()); - int bin = tracksterTile[TracksterIterIndex::TRK].globalBin(t.barycenter().eta(), t.barycenter().phi()); - LogDebug("TrackstersMergeProducer") - << "TrackstersMergeProducer Tracking obj: " << t.barycenter() << " in bin " << bin << " etaBin " - << entryEtaBin << " phiBin " << entryPhiBin << std::endl; - dumpTrackster(t); - } - auto const &track = tracks[t.seedIndex()]; - auto trk_pt = (float)track.pt(); - auto diff_pt = t.raw_pt() - trk_pt; - auto pt_err = trk_pt * resol_calo_scale_ + resol_calo_offset_; - auto w_cal = 1. / (pt_err * pt_err); - auto w_trk = 1. / (track.ptError() * track.ptError()); - auto diff_pt_sigmas = diff_pt / pt_err; - auto e_over_h = (t.raw_em_pt() / ((t.raw_pt() - t.raw_em_pt()) != 0. ? (t.raw_pt() - t.raw_em_pt()) : 1.)); - LogDebug("TrackstersMergeProducer") - << "trackster_pt: " << t.raw_pt() << std::endl - << "track pt (inner): " << track.pt() << std::endl - << "track eta (inner): " << track.eta() << std::endl - << "track _phi (inner): " << track.phi() << std::endl - << "track pt (outer): " << std::sqrt(track.outerMomentum().perp2()) << std::endl - << "track eta (outer): " << track.outerMomentum().eta() << std::endl - << "track _phi (outer): " << track.outerMomentum().phi() << std::endl - << "pt_err_track: " << track.ptError() << std::endl - << "diff_pt: " << diff_pt << std::endl - << "pt_err: " << pt_err << std::endl - << "diff_pt_sigmas: " << diff_pt_sigmas << std::endl - << "w_cal: " << w_cal << std::endl - << "w_trk: " << w_trk << std::endl - << "average_pt: " << (t.raw_pt() * w_cal + trk_pt * w_trk) / (w_cal + w_trk) << std::endl - << "e_over_h: " << e_over_h << std::endl; - - // If the energy is unbalanced and higher in Calo ==> balance it by - // emitting gammas/neutrals - if (diff_pt_sigmas > pt_sigma_high_) { - if (e_over_h > e_over_h_threshold_) { - auto gamma_pt = std::min(diff_pt, t.raw_em_pt()); - // Create gamma with calo direction - LogDebug("TrackstersMergeProducer") - << " Creating a photon from TRK Trackster with energy " << gamma_pt << " and direction " - << t.eigenvectors(0).eta() << ", " << t.eigenvectors(0).phi() << std::endl; - diff_pt -= gamma_pt; - if (diff_pt > pt_neutral_threshold_) { - // Create also a neutral on top, with calo direction and diff_pt as energy - LogDebug("TrackstersMergeProducer") - << " Adding also a neutral hadron from TRK Trackster with energy " << diff_pt << " and direction " - << t.eigenvectors(0).eta() << ", " << t.eigenvectors(0).phi() << std::endl; - } - } else { - // Create neutral with calo direction - LogDebug("TrackstersMergeProducer") - << " Creating a neutral hadron from TRK Trackster with energy " << diff_pt << " and direction " - << t.eigenvectors(0).eta() << ", " << t.eigenvectors(0).phi() << std::endl; - } - } - // If the energy is in the correct ball-park (this also covers the - // previous case after the neutral emission), use weighted averages - if (diff_pt_sigmas > -pt_sigma_low_) { - // Create either an electron or a charged hadron, using the weighted - // average information between the track and the cluster for the - // energy, the direction of the track at the vertex. The PID is simply - // passed over to the final trackster, while the energy is changed. - auto average_pt = (w_cal * t.raw_pt() + trk_pt * w_trk) / (w_cal + w_trk); - LogDebug("TrackstersMergeProducer") - << " Creating electron/charged hadron from TRK Trackster with weighted p_t " << average_pt - << " and direction " << track.eta() << ", " << track.phi() << std::endl; + for (auto const &t : trackstersTRKEM) { + indexInMergedCollTRKEM.push_back(resultTrackstersMerged->size()); + seedToTracksterAssociator[t.seedIndex()].emplace_back(resultTrackstersMerged->size(), TracksterIterIndex::TRKEM); + resultTrackstersMerged->push_back(t); + iterMergedTracksters.push_back(TracksterIterIndex::TRKEM); + } - } - // If the energy of the calo is too low, just use track-only information - else { - // Create either an electron or a charged hadron, using the track - // information only. - LogDebug("TrackstersMergeProducer") - << " Creating electron/charged hadron from TRK Trackster with track p_t " << trk_pt << " and direction " - << track.eta() << ", " << track.phi() << std::endl; - } - result->push_back(t); - usedTrackstersTRK[tracksterTRK_idx] = true; - tracksterTRK_idx++; - } + for (auto const &t : trackstersEM) { + indexInMergedCollEM.push_back(resultTrackstersMerged->size()); + resultTrackstersMerged->push_back(t); + iterMergedTracksters.push_back(TracksterIterIndex::EM); } - tracksterTRK_idx = 0; for (auto const &t : trackstersTRK) { - if (debug_) { - LogDebug("TrackstersMergeProducer") << " Considering trackster " << tracksterTRK_idx - << " as used: " << usedTrackstersTRK[tracksterTRK_idx] << std::endl; - } - if (!usedTrackstersTRK[tracksterTRK_idx]) { - LogDebug("TrackstersMergeProducer") - << " Creating a charge hadron from TRK Trackster with track energy " << t.raw_energy() << " and direction " - << t.eigenvectors(0).eta() << ", " << t.eigenvectors(0).phi() << std::endl; - result->push_back(t); - } - tracksterTRK_idx++; + indexInMergedCollTRK.push_back(resultTrackstersMerged->size()); + seedToTracksterAssociator[t.seedIndex()].emplace_back(resultTrackstersMerged->size(), TracksterIterIndex::TRK); + resultTrackstersMerged->push_back(t); + iterMergedTracksters.push_back(TracksterIterIndex::TRK); } - auto tracksterEM_idx = 0; - for (auto const &t : trackstersEM) { - if (debug_) { - LogDebug("TrackstersMergeProducer") << " Considering trackster " << tracksterEM_idx - << " as used: " << usedTrackstersEM[tracksterEM_idx] << std::endl; - } - if (!usedTrackstersEM[tracksterEM_idx]) { - LogDebug("TrackstersMergeProducer") - << " Creating a photon from EM Trackster with track energy " << t.raw_energy() << " and direction " - << t.eigenvectors(0).eta() << ", " << t.eigenvectors(0).phi() << std::endl; - result->push_back(t); - } - tracksterEM_idx++; + for (auto const &t : trackstersHAD) { + indexInMergedCollHAD.push_back(resultTrackstersMerged->size()); + resultTrackstersMerged->push_back(t); + iterMergedTracksters.push_back(TracksterIterIndex::HAD); } - tracksterHAD_idx = 0; - for (auto const &t : trackstersHAD) { - if (debug_) { - LogDebug("TrackstersMergeProducer") << " Considering trackster " << tracksterHAD_idx - << " as used: " << usedTrackstersHAD[tracksterHAD_idx] << std::endl; + assignPCAtoTracksters(*resultTrackstersMerged, layerClusters, rhtools_.getPositionLayer(rhtools_.lastLayerEE()).z()); + energyRegressionAndID(layerClusters, *resultTrackstersMerged); + + printTrackstersDebug(*resultTrackstersMerged, "TrackstersMergeProducer"); + + auto trackstersMergedHandle = evt.put(std::move(resultTrackstersMerged)); + + // TICL Candidate creation + // We start from neutrals first + + // Photons + for (unsigned i = 0; i < trackstersEM.size(); ++i) { + auto mergedIdx = indexInMergedCollEM[i]; + usedTrackstersMerged[mergedIdx] = true; + const auto &t = trackstersEM[i]; //trackster + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, mergedIdx)); + tmpCandidate.setCharge(0); + tmpCandidate.setPdgId(22); + tmpCandidate.setRawEnergy(t.raw_energy()); + math::XYZTLorentzVector p4(t.raw_energy() * t.barycenter().unit().x(), + t.raw_energy() * t.barycenter().unit().y(), + t.raw_energy() * t.barycenter().unit().z(), + t.raw_energy()); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + } + + // Neutral Hadrons + constexpr float mpion2 = 0.13957f * 0.13957f; + for (unsigned i = 0; i < trackstersHAD.size(); ++i) { + auto mergedIdx = indexInMergedCollHAD[i]; + usedTrackstersMerged[mergedIdx] = true; + const auto &t = trackstersHAD[i]; //trackster + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, mergedIdx)); + tmpCandidate.setCharge(0); + tmpCandidate.setPdgId(130); + tmpCandidate.setRawEnergy(t.raw_energy()); + float momentum = std::sqrt(t.raw_energy() * t.raw_energy() - mpion2); + math::XYZTLorentzVector p4(momentum * t.barycenter().unit().x(), + momentum * t.barycenter().unit().y(), + momentum * t.barycenter().unit().z(), + t.raw_energy()); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + } + + // Charged Particles + for (unsigned i = 0; i < trackstersTRKEM.size(); ++i) { + auto mergedIdx = indexInMergedCollTRKEM[i]; + if (!usedTrackstersMerged[mergedIdx]) { + const auto &t = trackstersTRKEM[i]; //trackster + auto trackIdx = t.seedIndex(); + auto const &track = tracks[trackIdx]; + if (!usedSeeds[trackIdx] and t.raw_energy() > 0) { + usedSeeds[trackIdx] = true; + usedTrackstersMerged[mergedIdx] = true; + + std::vector trackstersTRKwithSameSeed; + std::vector trackstersTRKEMwithSameSeed; + + for (const auto &tracksterIterationPair : seedToTracksterAssociator[trackIdx]) { + if (tracksterIterationPair.first != mergedIdx and !usedTrackstersMerged[tracksterIterationPair.first] and + trackstersMergedHandle->at(tracksterIterationPair.first).raw_energy() > 0.) { + if (tracksterIterationPair.second == TracksterIterIndex::TRKEM) { + trackstersTRKEMwithSameSeed.push_back(tracksterIterationPair.first); + } else if (tracksterIterationPair.second == TracksterIterIndex::TRK) { + trackstersTRKwithSameSeed.push_back(tracksterIterationPair.first); + } + } + } + + float tracksterTotalRawPt = t.raw_pt(); + std::vector haloTrackstersTRKIdx; + bool foundCompatibleTRK = false; + + for (auto otherTracksterIdx : trackstersTRKwithSameSeed) { + usedTrackstersMerged[otherTracksterIdx] = true; + tracksterTotalRawPt += trackstersMergedHandle->at(otherTracksterIdx).raw_pt(); + + // Check the X,Y,Z barycenter and merge if they are very close (halo) + if ((t.barycenter() - trackstersMergedHandle->at(otherTracksterIdx).barycenter()).mag2() < + halo_max_distance2_) { + haloTrackstersTRKIdx.push_back(otherTracksterIdx); + + } else { + foundCompatibleTRK = true; + } + } + + //check if there is 1-to-1 relationship + if (trackstersTRKEMwithSameSeed.empty()) { + if (foundCompatibleTRK) { + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, mergedIdx)); + double raw_energy = t.raw_energy(); + + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, trackIdx)); + tmpCandidate.setPdgId(211 * track.charge()); + for (auto otherTracksterIdx : trackstersTRKwithSameSeed) { + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, otherTracksterIdx)); + raw_energy += trackstersMergedHandle->at(otherTracksterIdx).raw_energy(); + } + tmpCandidate.setRawEnergy(raw_energy); + math::XYZTLorentzVector p4(raw_energy * track.momentum().unit().x(), + raw_energy * track.momentum().unit().y(), + raw_energy * track.momentum().unit().z(), + raw_energy); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + + } else { + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, mergedIdx)); + double raw_energy = t.raw_energy(); + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, trackIdx)); + for (auto otherTracksterIdx : trackstersTRKwithSameSeed) { + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, otherTracksterIdx)); + raw_energy += trackstersMergedHandle->at(otherTracksterIdx).raw_energy(); + } + tmpCandidate.setPdgId(11 * track.charge()); + + tmpCandidate.setRawEnergy(raw_energy); + math::XYZTLorentzVector p4(raw_energy * track.momentum().unit().x(), + raw_energy * track.momentum().unit().y(), + raw_energy * track.momentum().unit().z(), + raw_energy); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + } + + } else { + // if 1-to-many find closest trackster in momentum + int closestTrackster = mergedIdx; + float minPtDiff = std::abs(t.raw_pt() - track.pt()); + for (auto otherTracksterIdx : trackstersTRKEMwithSameSeed) { + auto thisPt = tracksterTotalRawPt + trackstersMergedHandle->at(otherTracksterIdx).raw_pt() - t.raw_pt(); + closestTrackster = std::abs(thisPt - track.pt()) < minPtDiff ? otherTracksterIdx : closestTrackster; + } + usedTrackstersMerged[closestTrackster] = true; + + if (foundCompatibleTRK) { + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, closestTrackster)); + double raw_energy = trackstersMergedHandle->at(closestTrackster).raw_energy(); + + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, trackIdx)); + tmpCandidate.setPdgId(211 * track.charge()); + for (auto otherTracksterIdx : trackstersTRKwithSameSeed) { + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, otherTracksterIdx)); + raw_energy += trackstersMergedHandle->at(otherTracksterIdx).raw_energy(); + } + tmpCandidate.setRawEnergy(raw_energy); + float momentum = std::sqrt(raw_energy * raw_energy - mpion2); + math::XYZTLorentzVector p4(momentum * track.momentum().unit().x(), + momentum * track.momentum().unit().y(), + momentum * track.momentum().unit().z(), + raw_energy); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + + } else { + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, closestTrackster)); + double raw_energy = trackstersMergedHandle->at(closestTrackster).raw_energy(); + + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, trackIdx)); + for (auto otherTracksterIdx : trackstersTRKwithSameSeed) { + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, otherTracksterIdx)); + raw_energy += trackstersMergedHandle->at(otherTracksterIdx).raw_energy(); + } + tmpCandidate.setPdgId(11 * track.charge()); + tmpCandidate.setRawEnergy(raw_energy); + math::XYZTLorentzVector p4(raw_energy * track.momentum().unit().x(), + raw_energy * track.momentum().unit().y(), + raw_energy * track.momentum().unit().z(), + raw_energy); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + } + // Promote all other TRKEM tracksters as photons with their energy. + for (auto otherTracksterIdx : trackstersTRKEMwithSameSeed) { + auto tmpIndex = (otherTracksterIdx != closestTrackster) ? otherTracksterIdx : mergedIdx; + TICLCandidate photonCandidate; + const auto &otherTrackster = trackstersMergedHandle->at(tmpIndex); + auto gammaEnergy = otherTrackster.raw_energy(); + photonCandidate.setCharge(0); + photonCandidate.setPdgId(22); + photonCandidate.setRawEnergy(gammaEnergy); + math::XYZTLorentzVector gammaP4(gammaEnergy * otherTrackster.barycenter().unit().x(), + gammaEnergy * otherTrackster.barycenter().unit().y(), + gammaEnergy * otherTrackster.barycenter().unit().z(), + gammaEnergy); + photonCandidate.setP4(gammaP4); + photonCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, tmpIndex)); + resultCandidates->push_back(photonCandidate); + } + } + } } - if (!usedTrackstersHAD[tracksterHAD_idx]) { - LogDebug("TrackstersMergeProducer") - << " Creating a neutral hadron from HAD Trackster with track energy " << t.raw_energy() << " and direction " - << t.eigenvectors(0).eta() << ", " << t.eigenvectors(0).phi() << std::endl; - result->push_back(t); + } //end of loop over trackstersTRKEM + + for (unsigned i = 0; i < trackstersTRK.size(); ++i) { + auto mergedIdx = indexInMergedCollTRK[i]; + const auto &t = trackstersTRK[i]; //trackster + + if (!usedTrackstersMerged[mergedIdx] and t.raw_energy() > 0) { + auto trackIdx = t.seedIndex(); + auto const &track = tracks[trackIdx]; + if (!usedSeeds[trackIdx]) { + usedSeeds[trackIdx] = true; + usedTrackstersMerged[mergedIdx] = true; + TICLCandidate tmpCandidate; + tmpCandidate.addTrackster(edm::Ptr(trackstersMergedHandle, mergedIdx)); + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, trackIdx)); + tmpCandidate.setPdgId(211 * track.charge()); + tmpCandidate.setRawEnergy(t.raw_energy()); + float momentum = std::sqrt(t.raw_energy() * t.raw_energy() - mpion2); + math::XYZTLorentzVector p4(momentum * track.momentum().unit().x(), + momentum * track.momentum().unit().y(), + momentum * track.momentum().unit().z(), + t.raw_energy()); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + } + } + } + // For all seeds that have 0-energy tracksters whose track is not marked as used, create a charged hadron with the track information. + for (auto const &s : seedingTrk) { + if (usedSeeds[s.index] == false) { + auto const &track = tracks[s.index]; + // emit a charged hadron + TICLCandidate tmpCandidate; + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, s.index)); + tmpCandidate.setPdgId(211 * track.charge()); + float energy = std::sqrt(track.pt() * track.pt() + mpion2); + tmpCandidate.setRawEnergy(energy); + math::XYZTLorentzVector p4(track.momentum().x(), track.momentum().y(), track.momentum().z(), energy); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + usedSeeds[s.index] = true; } - tracksterHAD_idx++; } - assignPCAtoTracksters(*result, layerClusters, rhtools_.getPositionLayer(rhtools_.lastLayerEE()).z()); - energyRegressionAndID(layerClusters, *result); - - printTrackstersDebug(*result, "TrackstersMergeProducer"); + // for all general tracks (high purity, pt > 1), check if they have been used: if not, promote them as charged hadrons + for (unsigned i = 0; i < tracks.size(); ++i) { + auto const &track = tracks[i]; + if (track.pt() > track_min_pt_ and track.quality(reco::TrackBase::highPurity) and + track.hitPattern().numberOfLostHits(reco::HitPattern::MISSING_OUTER_HITS) < track_max_missing_outerhits_ and + std::abs(track.outerEta()) > track_min_eta_ and std::abs(track.outerEta()) < track_max_eta_ and + usedSeeds[i] == false) { + // emit a charged hadron + TICLCandidate tmpCandidate; + tmpCandidate.setCharge(track.charge()); + tmpCandidate.setTrackPtr(edm::Ptr(track_h, i)); + tmpCandidate.setPdgId(211 * track.charge()); + float energy = std::sqrt(track.pt() * track.pt() + mpion2); + tmpCandidate.setRawEnergy(energy); + math::XYZTLorentzVector p4(track.momentum().x(), track.momentum().y(), track.momentum().z(), energy); + tmpCandidate.setP4(p4); + resultCandidates->push_back(tmpCandidate); + usedSeeds[i] = true; + } + } - evt.put(std::move(result)); + evt.put(std::move(resultCandidates)); } void TrackstersMergeProducer::energyRegressionAndID(const std::vector &layerClusters, @@ -534,6 +720,7 @@ void TrackstersMergeProducer::printTrackstersDebug(const std::vector void TrackstersMergeProducer::fillDescriptions(edm::ConfigurationDescriptions &descriptions) { edm::ParameterSetDescription desc; + desc.add("tracksterstrkem", edm::InputTag("ticlTrackstersTrkEM")); desc.add("trackstersem", edm::InputTag("ticlTrackstersEM")); desc.add("tracksterstrk", edm::InputTag("ticlTrackstersTrk")); desc.add("trackstershad", edm::InputTag("ticlTrackstersHAD")); @@ -545,11 +732,18 @@ void TrackstersMergeProducer::fillDescriptions(edm::ConfigurationDescriptions &d desc.add("phi_bin_window", 1); desc.add("pt_sigma_high", 2.); desc.add("pt_sigma_low", 2.); + desc.add("halo_max_distance2", 4.); + desc.add("track_min_pt", 1.); + desc.add("track_min_eta", 1.48); + desc.add("track_max_eta", 3.); + desc.add("track_max_missing_outerhits", 5); desc.add("cosangle_align", 0.9945); desc.add("e_over_h_threshold", 1.); desc.add("pt_neutral_threshold", 2.); - desc.add("resol_calo_offset", 1.5); - desc.add("resol_calo_scale", 0.15); + desc.add("resol_calo_offset_had", 1.5); + desc.add("resol_calo_scale_had", 0.15); + desc.add("resol_calo_offset_em", 1.5); + desc.add("resol_calo_scale_em", 0.15); desc.add("debug", true); desc.add("eid_graph_path", "RecoHGCal/TICL/data/tf_models/energy_id_v0.pb"); desc.add("eid_input_name", "input"); diff --git a/RecoHGCal/TICL/plugins/TrackstersProducer.cc b/RecoHGCal/TICL/plugins/TrackstersProducer.cc index d9d485d216404..38650eeab1211 100644 --- a/RecoHGCal/TICL/plugins/TrackstersProducer.cc +++ b/RecoHGCal/TICL/plugins/TrackstersProducer.cc @@ -42,16 +42,18 @@ class TrackstersProducer : public edm::stream::EDProducer myAlgo_; + std::string detector_; + bool doNose_; + std::unique_ptr> myAlgo_; + std::unique_ptr> myAlgoHFNose_; const edm::EDGetTokenT> clusters_token_; const edm::EDGetTokenT> filtered_layerclusters_mask_token_; const edm::EDGetTokenT> original_layerclusters_mask_token_; const edm::EDGetTokenT>> clustersTime_token_; - const edm::EDGetTokenT layer_clusters_tiles_token_; + edm::EDGetTokenT layer_clusters_tiles_token_; + edm::EDGetTokenT layer_clusters_tiles_hfnose_token_; const edm::EDGetTokenT> seeding_regions_token_; - const std::vector filter_on_categories_; - const double pid_threshold_; const std::string itername_; }; DEFINE_FWK_MODULE(TrackstersProducer); @@ -76,18 +78,24 @@ void TrackstersProducer::globalEndJob(TrackstersCache* cache) { } TrackstersProducer::TrackstersProducer(const edm::ParameterSet& ps, const TrackstersCache* cache) - : myAlgo_(std::make_unique(ps, cache)), + : detector_(ps.getParameter("detector")), + doNose_(detector_ == "HFNose"), + myAlgo_(std::make_unique>(ps, cache)), + myAlgoHFNose_(std::make_unique>(ps, cache)), clusters_token_(consumes>(ps.getParameter("layer_clusters"))), filtered_layerclusters_mask_token_(consumes>(ps.getParameter("filtered_mask"))), original_layerclusters_mask_token_(consumes>(ps.getParameter("original_mask"))), clustersTime_token_( consumes>>(ps.getParameter("time_layerclusters"))), - layer_clusters_tiles_token_(consumes(ps.getParameter("layer_clusters_tiles"))), seeding_regions_token_( consumes>(ps.getParameter("seeding_regions"))), - filter_on_categories_(ps.getParameter>("filter_on_categories")), - pid_threshold_(ps.getParameter("pid_threshold")), itername_(ps.getParameter("itername")) { + if (doNose_) { + layer_clusters_tiles_hfnose_token_ = + consumes(ps.getParameter("layer_clusters_hfnose_tiles")); + } else { + layer_clusters_tiles_token_ = consumes(ps.getParameter("layer_clusters_tiles")); + } produces>(); produces>(); // Mask to be applied at the next iteration } @@ -95,20 +103,27 @@ TrackstersProducer::TrackstersProducer(const edm::ParameterSet& ps, const Tracks void TrackstersProducer::fillDescriptions(edm::ConfigurationDescriptions& descriptions) { // hgcalMultiClusters edm::ParameterSetDescription desc; + desc.add("detector", "HGCAL"); desc.add("layer_clusters", edm::InputTag("hgcalLayerClusters")); desc.add("filtered_mask", edm::InputTag("filteredLayerClusters", "iterationLabelGoesHere")); desc.add("original_mask", edm::InputTag("hgcalLayerClusters", "InitialLayerClustersMask")); desc.add("time_layerclusters", edm::InputTag("hgcalLayerClusters", "timeLayerCluster")); desc.add("layer_clusters_tiles", edm::InputTag("ticlLayerTileProducer")); + desc.add("layer_clusters_hfnose_tiles", edm::InputTag("ticlLayerTileHFNose")); desc.add("seeding_regions", edm::InputTag("ticlSeedingRegionProducer")); desc.add>("filter_on_categories", {0}); - desc.add("pid_threshold", 0.); // make default such that no filtering is applied + desc.add("pid_threshold", 0.); // make default such that no filtering is applied + desc.add("energy_em_over_total_threshold", -1.); // make default such that no filtering is applied + desc.add("max_longitudinal_sigmaPCA", 9999); + desc.add("shower_start_max_layer", 9999); // make default such that no filtering is applied desc.add("algo_verbosity", 0); desc.add("min_cos_theta", 0.915); + desc.add("root_doublet_max_distance_from_seed_squared", 9999); desc.add("min_cos_pointing", -1.); - desc.add("missing_layers", 0); + desc.add("skip_layers", 0); + desc.add("max_missing_layers_in_trackster", 9999); desc.add("etaLimitIncreaseWindow", 2.1); - desc.add("min_clusters_per_ntuplet", 10); + desc.add("min_layers_per_trackster", 10); desc.add("max_delta_time", 3.); //nsigma desc.add("out_in_dfs", true); desc.add("max_out_in_hops", 10); @@ -129,27 +144,11 @@ void TrackstersProducer::produce(edm::Event& evt, const edm::EventSetup& es) { auto result = std::make_unique>(); auto output_mask = std::make_unique>(); - edm::Handle> cluster_h; - edm::Handle> filtered_layerclusters_mask_h; - edm::Handle> original_layerclusters_mask_h; - edm::Handle>> time_clusters_h; - edm::Handle layer_clusters_tiles_h; - edm::Handle> seeding_regions_h; - - evt.getByToken(clusters_token_, cluster_h); - evt.getByToken(filtered_layerclusters_mask_token_, filtered_layerclusters_mask_h); - evt.getByToken(original_layerclusters_mask_token_, original_layerclusters_mask_h); - evt.getByToken(clustersTime_token_, time_clusters_h); - evt.getByToken(layer_clusters_tiles_token_, layer_clusters_tiles_h); - evt.getByToken(seeding_regions_token_, seeding_regions_h); - - const auto& layerClusters = *cluster_h; - const auto& inputClusterMask = *filtered_layerclusters_mask_h; - const auto& layerClustersTimes = *time_clusters_h; - const auto& layer_clusters_tiles = *layer_clusters_tiles_h; - const auto& seeding_regions = *seeding_regions_h; - const ticl::PatternRecognitionAlgoBase::Inputs input( - evt, es, layerClusters, inputClusterMask, layerClustersTimes, layer_clusters_tiles, seeding_regions); + const std::vector& original_layerclusters_mask = evt.get(original_layerclusters_mask_token_); + const auto& layerClusters = evt.get(clusters_token_); + const auto& inputClusterMask = evt.get(filtered_layerclusters_mask_token_); + const auto& layerClustersTimes = evt.get(clustersTime_token_); + const auto& seeding_regions = evt.get(seeding_regions_token_); std::unordered_map> seedToTrackstersAssociation; // if it's regional iteration and there are seeding regions @@ -159,31 +158,26 @@ void TrackstersProducer::produce(edm::Event& evt, const edm::EventSetup& es) { seedToTrackstersAssociation.emplace(seeding_regions[i].index, 0); } } - myAlgo_->makeTracksters(input, *result, seedToTrackstersAssociation); + if (doNose_) { + const auto& layer_clusters_hfnose_tiles = evt.get(layer_clusters_tiles_hfnose_token_); + const typename PatternRecognitionAlgoBaseT::Inputs inputHFNose( + evt, es, layerClusters, inputClusterMask, layerClustersTimes, layer_clusters_hfnose_tiles, seeding_regions); + + myAlgoHFNose_->makeTracksters(inputHFNose, *result, seedToTrackstersAssociation); + + } else { + const auto& layer_clusters_tiles = evt.get(layer_clusters_tiles_token_); + const typename PatternRecognitionAlgoBaseT::Inputs input( + evt, es, layerClusters, inputClusterMask, layerClustersTimes, layer_clusters_tiles, seeding_regions); + + myAlgo_->makeTracksters(input, *result, seedToTrackstersAssociation); + } // Now update the global mask and put it into the event - output_mask->reserve(original_layerclusters_mask_h->size()); + output_mask->reserve(original_layerclusters_mask.size()); // Copy over the previous state - std::copy(std::begin(*original_layerclusters_mask_h), - std::end(*original_layerclusters_mask_h), - std::back_inserter(*output_mask)); - - // Filter results based on PID criteria. - // We want to **keep** tracksters whose cumulative - // probability summed up over the selected categories - // is greater than the chosen threshold. Therefore - // the filtering function should **discard** all - // tracksters **below** the threshold. - auto filter_on_pids = [&](Trackster& t) -> bool { - auto cumulative_prob = 0.; - for (auto index : filter_on_categories_) { - cumulative_prob += t.id_probabilities(index); - } - return cumulative_prob <= pid_threshold_; - }; - - // Actually filter results and shrink size to fit - result->erase(std::remove_if(result->begin(), result->end(), filter_on_pids), result->end()); + std::copy( + std::begin(original_layerclusters_mask), std::end(original_layerclusters_mask), std::back_inserter(*output_mask)); // Mask the used elements, accordingly for (auto const& trackster : *result) { diff --git a/RecoHGCal/TICL/plugins/filters.cc b/RecoHGCal/TICL/plugins/filters.cc index edbd834f4df90..54a89147dfb1d 100644 --- a/RecoHGCal/TICL/plugins/filters.cc +++ b/RecoHGCal/TICL/plugins/filters.cc @@ -7,9 +7,13 @@ #include "ClusterFilterByAlgo.h" #include "ClusterFilterByAlgoAndSize.h" #include "ClusterFilterBySize.h" +#include "ClusterFilterByAlgoAndSizeAndLayerRange.h" using namespace ticl; DEFINE_EDM_PLUGIN(ClusterFilterFactory, ClusterFilterByAlgo, "ClusterFilterByAlgo"); DEFINE_EDM_PLUGIN(ClusterFilterFactory, ClusterFilterByAlgoAndSize, "ClusterFilterByAlgoAndSize"); DEFINE_EDM_PLUGIN(ClusterFilterFactory, ClusterFilterBySize, "ClusterFilterBySize"); +DEFINE_EDM_PLUGIN(ClusterFilterFactory, + ClusterFilterByAlgoAndSizeAndLayerRange, + "ClusterFilterByAlgoAndSizeAndLayerRange"); diff --git a/RecoHGCal/TICL/python/EMStep_cff.py b/RecoHGCal/TICL/python/EMStep_cff.py index 7bc07499f3b80..0aa6f9ca50fd9 100644 --- a/RecoHGCal/TICL/python/EMStep_cff.py +++ b/RecoHGCal/TICL/python/EMStep_cff.py @@ -1,7 +1,6 @@ import FWCore.ParameterSet.Config as cms -from RecoHGCal.TICL.TICLSeedingRegions_cff import ticlSeedingGlobal -from RecoHGCal.TICL.ticlLayerTileProducer_cfi import ticlLayerTileProducer as _ticlLayerTileProducer +from RecoHGCal.TICL.TICLSeedingRegions_cff import ticlSeedingGlobal, ticlSeedingGlobalHFNose from RecoHGCal.TICL.trackstersProducer_cfi import trackstersProducer as _trackstersProducer from RecoHGCal.TICL.filteredLayerClustersProducer_cfi import filteredLayerClustersProducer as _filteredLayerClustersProducer from RecoHGCal.TICL.multiClustersFromTrackstersProducer_cfi import multiClustersFromTrackstersProducer as _multiClustersFromTrackstersProducer @@ -9,25 +8,30 @@ # CLUSTER FILTERING/MASKING filteredLayerClustersEM = _filteredLayerClustersProducer.clone( - clusterFilter = "ClusterFilterByAlgoAndSize", - min_cluster_size = 2, # inclusive + clusterFilter = "ClusterFilterByAlgoAndSizeAndLayerRange", + min_cluster_size = 3, # inclusive + max_layerId = 30, # inclusive algo_number = 8, - LayerClustersInputMask = 'ticlTrackstersTrk', + LayerClustersInputMask = 'ticlTrackstersTrkEM', iteration_label = "EM" ) # CA - PATTERN RECOGNITION ticlTrackstersEM = _trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersEM", "EM"), - original_mask = 'ticlTrackstersTrk', + filtered_mask = "filteredLayerClustersEM:EM", + original_mask = 'ticlTrackstersTrkEM', seeding_regions = "ticlSeedingGlobal", filter_on_categories = [0, 1], - pid_threshold = 0.8, - max_out_in_hops = 4, - missing_layers = 1, - min_clusters_per_ntuplet = 10, - min_cos_theta = 0.978, # ~12 degrees + pid_threshold = 0.5, + energy_em_over_total_threshold = 0.9, + max_longitudinal_sigmaPCA = 10, + shower_start_max_layer = 5, #inclusive + max_out_in_hops = 1, + skip_layers = 2, + max_missing_layers_in_trackster = 1, + min_layers_per_trackster = 10, + min_cos_theta = 0.97, # ~14 degrees min_cos_pointing = 0.9, # ~25 degrees max_delta_time = 3., itername = "EM", @@ -45,3 +49,26 @@ ,ticlTrackstersEM ,ticlMultiClustersFromTrackstersEM) +filteredLayerClustersHFNoseEM = filteredLayerClustersEM.clone( + LayerClusters = 'hgcalLayerClustersHFNose', + LayerClustersInputMask = "hgcalLayerClustersHFNose:InitialLayerClustersMask", + iteration_label = "EMn", + algo_number = 9 +#no tracking mask for EM for now +) + +ticlTrackstersHFNoseEM = ticlTrackstersEM.clone( + detector = "HFNose", + layer_clusters = "hgcalLayerClustersHFNose", + layer_clusters_hfnose_tiles = "ticlLayerTileHFNose", + original_mask = "hgcalLayerClustersHFNose:InitialLayerClustersMask", + filtered_mask = "filteredLayerClustersHFNoseEM:EMn", + seeding_regions = "ticlSeedingGlobalHFNose", + time_layerclusters = "hgcalLayerClustersHFNose:timeLayerCluster", + min_layers_per_trackster = 6 +) + +ticlHFNoseEMStepTask = cms.Task(ticlSeedingGlobalHFNose + ,filteredLayerClustersHFNoseEM + ,ticlTrackstersHFNoseEM +) diff --git a/RecoHGCal/TICL/python/HADStep_cff.py b/RecoHGCal/TICL/python/HADStep_cff.py index 9f4b726250c03..72efaebc201ec 100644 --- a/RecoHGCal/TICL/python/HADStep_cff.py +++ b/RecoHGCal/TICL/python/HADStep_cff.py @@ -10,23 +10,23 @@ filteredLayerClustersHAD = _filteredLayerClustersProducer.clone( clusterFilter = "ClusterFilterByAlgoAndSize", - min_cluster_size = 2, # inclusive + min_cluster_size = 3, # inclusive algo_number = 8, iteration_label = "HAD", - LayerClustersInputMask = "ticlTrackstersEM" + LayerClustersInputMask = "ticlTrackstersTrk" ) # CA - PATTERN RECOGNITION ticlTrackstersHAD = _trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersHAD", "HAD"), - original_mask = 'ticlTrackstersEM', + filtered_mask = "filteredLayerClustersHAD:HAD", + original_mask = 'ticlTrackstersTrk', seeding_regions = "ticlSeedingGlobal", # For the moment we mask everything w/o requirements since we are last # filter_on_categories = [5], # filter neutral hadrons # pid_threshold = 0.7, - missing_layers = 1, - min_clusters_per_ntuplet = 12, + skip_layers = 1, + min_layers_per_trackster = 12, min_cos_theta = 0.866, # ~30 degrees min_cos_pointing = 0.819, # ~35 degrees max_delta_time = -1, diff --git a/RecoHGCal/TICL/python/MIPStep_cff.py b/RecoHGCal/TICL/python/MIPStep_cff.py index 8e1c5881986e8..f7e5234b3aa9c 100644 --- a/RecoHGCal/TICL/python/MIPStep_cff.py +++ b/RecoHGCal/TICL/python/MIPStep_cff.py @@ -1,7 +1,6 @@ import FWCore.ParameterSet.Config as cms -from RecoHGCal.TICL.TICLSeedingRegions_cff import ticlSeedingGlobal -from RecoHGCal.TICL.ticlLayerTileProducer_cfi import ticlLayerTileProducer as _ticlLayerTileProducer +from RecoHGCal.TICL.TICLSeedingRegions_cff import ticlSeedingGlobal, ticlSeedingGlobalHFNose from RecoHGCal.TICL.trackstersProducer_cfi import trackstersProducer as _trackstersProducer from RecoHGCal.TICL.filteredLayerClustersProducer_cfi import filteredLayerClustersProducer as _filteredLayerClustersProducer from RecoHGCal.TICL.multiClustersFromTrackstersProducer_cfi import multiClustersFromTrackstersProducer as _multiClustersFromTrackstersProducer @@ -15,13 +14,14 @@ iteration_label = "MIP" ) + # CA - PATTERN RECOGNITION ticlTrackstersMIP = _trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersMIP", "MIP"), + filtered_mask = "filteredLayerClustersMIP:MIP", seeding_regions = "ticlSeedingGlobal", - missing_layers = 3, - min_clusters_per_ntuplet = 10, + skip_layers = 3, + min_layers_per_trackster = 10, min_cos_theta = 0.99, # ~10 degrees min_cos_pointing = 0.5, out_in_dfs = False, @@ -40,3 +40,25 @@ ,ticlTrackstersMIP ,ticlMultiClustersFromTrackstersMIP) +filteredLayerClustersHFNoseMIP = filteredLayerClustersMIP.clone( + LayerClusters = 'hgcalLayerClustersHFNose', + LayerClustersInputMask = "hgcalLayerClustersHFNose:InitialLayerClustersMask", + iteration_label = "MIPn", + algo_number = 9 +) + +ticlTrackstersHFNoseMIP = ticlTrackstersMIP.clone( + detector = "HFNose", + layer_clusters = "hgcalLayerClustersHFNose", + layer_clusters_hfnose_tiles = "ticlLayerTileHFNose", + original_mask = "hgcalLayerClustersHFNose:InitialLayerClustersMask", + filtered_mask = "filteredLayerClustersHFNoseMIP:MIPn", + seeding_regions = "ticlSeedingGlobalHFNose", + time_layerclusters = "hgcalLayerClustersHFNose:timeLayerCluster", + min_layers_per_trackster = 6 +) + +ticlHFNoseMIPStepTask = cms.Task(ticlSeedingGlobalHFNose + ,filteredLayerClustersHFNoseMIP + ,ticlTrackstersHFNoseMIP +) diff --git a/RecoHGCal/TICL/python/TICLSeedingRegions_cff.py b/RecoHGCal/TICL/python/TICLSeedingRegions_cff.py index 9e4698c944d02..4b76fd39d322c 100644 --- a/RecoHGCal/TICL/python/TICLSeedingRegions_cff.py +++ b/RecoHGCal/TICL/python/TICLSeedingRegions_cff.py @@ -12,3 +12,6 @@ algoId = 1 ) +ticlSeedingGlobalHFNose = _ticlSeedingRegionProducer.clone( + algoId = 2 +) diff --git a/RecoHGCal/TICL/python/TrkEMStep_cff.py b/RecoHGCal/TICL/python/TrkEMStep_cff.py new file mode 100644 index 0000000000000..458f489f696ec --- /dev/null +++ b/RecoHGCal/TICL/python/TrkEMStep_cff.py @@ -0,0 +1,51 @@ +import FWCore.ParameterSet.Config as cms + +from RecoHGCal.TICL.TICLSeedingRegions_cff import ticlSeedingTrk +from RecoHGCal.TICL.trackstersProducer_cfi import trackstersProducer as _trackstersProducer +from RecoHGCal.TICL.filteredLayerClustersProducer_cfi import filteredLayerClustersProducer as _filteredLayerClustersProducer +from RecoHGCal.TICL.multiClustersFromTrackstersProducer_cfi import multiClustersFromTrackstersProducer as _multiClustersFromTrackstersProducer + +# CLUSTER FILTERING/MASKING + +filteredLayerClustersTrkEM = _filteredLayerClustersProducer.clone( + clusterFilter = "ClusterFilterByAlgoAndSizeAndLayerRange", + min_cluster_size = 3, # inclusive + max_layerId = 30, # inclusive + algo_number = 8, + iteration_label = "TrkEM" +) + +# CA - PATTERN RECOGNITION + +ticlTrackstersTrkEM = _trackstersProducer.clone( + filtered_mask = cms.InputTag("filteredLayerClustersTrkEM", "TrkEM"), + seeding_regions = "ticlSeedingTrk", + filter_on_categories = [0, 1], + pid_threshold = 0.5, + energy_em_over_total_threshold = 0.9, + max_longitudinal_sigmaPCA = 10, + shower_start_max_layer = 5, #inclusive + max_out_in_hops = 1, + max_missing_layers_in_trackster = 2, + skip_layers = 2, + min_layers_per_trackster = 10, + min_cos_theta = 0.97, # ~14 degrees + min_cos_pointing = 0.94, # ~20 degrees + root_doublet_max_distance_from_seed_squared = 2.5e-3, # dR=0.05 + max_delta_time = 3., + itername = "TrkEM", + algo_verbosity = 0, +) + + +# MULTICLUSTERS + +ticlMultiClustersFromTrackstersTrkEM = _multiClustersFromTrackstersProducer.clone( + Tracksters = "ticlTrackstersTrkEM" +) + +ticlTrkEMStepTask = cms.Task(ticlSeedingTrk + ,filteredLayerClustersTrkEM + ,ticlTrackstersTrkEM + ,ticlMultiClustersFromTrackstersTrkEM) + diff --git a/RecoHGCal/TICL/python/TrkStep_cff.py b/RecoHGCal/TICL/python/TrkStep_cff.py index 686ea18b9032a..739a7cafd5d2b 100644 --- a/RecoHGCal/TICL/python/TrkStep_cff.py +++ b/RecoHGCal/TICL/python/TrkStep_cff.py @@ -9,20 +9,23 @@ # CLUSTER FILTERING/MASKING filteredLayerClustersTrk = _filteredLayerClustersProducer.clone( - clusterFilter = "ClusterFilterByAlgo", + clusterFilter = "ClusterFilterByAlgoAndSize", + min_cluster_size = 3, # inclusive algo_number = 8, + LayerClustersInputMask = 'ticlTrackstersEM', iteration_label = "Trk" ) # CA - PATTERN RECOGNITION ticlTrackstersTrk = _trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersTrk", "Trk"), + filtered_mask = "filteredLayerClustersTrk:Trk", seeding_regions = "ticlSeedingTrk", + original_mask = 'ticlTrackstersEM', filter_on_categories = [2, 4], # filter muons and charged hadrons pid_threshold = 0.0, - missing_layers = 3, - min_clusters_per_ntuplet = 10, + skip_layers = 3, + min_layers_per_trackster = 10, min_cos_theta = 0.866, # ~30 degrees min_cos_pointing = 0.798, # ~ 37 degrees max_delta_time = -1., @@ -43,3 +46,4 @@ ,ticlTrackstersTrk ,ticlMultiClustersFromTrackstersTrk) + diff --git a/RecoHGCal/TICL/python/iterativeTICL_cff.py b/RecoHGCal/TICL/python/iterativeTICL_cff.py index 18f3e67a00f4d..9af6dc68a9af6 100644 --- a/RecoHGCal/TICL/python/iterativeTICL_cff.py +++ b/RecoHGCal/TICL/python/iterativeTICL_cff.py @@ -1,16 +1,15 @@ import FWCore.ParameterSet.Config as cms from RecoHGCal.TICL.MIPStep_cff import * +from RecoHGCal.TICL.TrkEMStep_cff import * from RecoHGCal.TICL.TrkStep_cff import * from RecoHGCal.TICL.EMStep_cff import * from RecoHGCal.TICL.HADStep_cff import * from RecoHGCal.TICL.ticlLayerTileProducer_cfi import ticlLayerTileProducer -from RecoHGCal.TICL.ticlCandidateFromTrackstersProducer_cfi import ticlCandidateFromTrackstersProducer as _ticlCandidateFromTrackstersProducer from RecoHGCal.TICL.pfTICLProducer_cfi import pfTICLProducer as _pfTICLProducer from RecoHGCal.TICL.trackstersMergeProducer_cfi import trackstersMergeProducer as _trackstersMergeProducer from RecoHGCal.TICL.multiClustersFromTrackstersProducer_cfi import multiClustersFromTrackstersProducer as _multiClustersFromTrackstersProducer - ticlLayerTileTask = cms.Task(ticlLayerTileProducer) ticlTrackstersMerge = _trackstersMergeProducer.clone() @@ -19,23 +18,31 @@ ) ticlTracksterMergeTask = cms.Task(ticlTrackstersMerge, ticlMultiClustersFromTrackstersMerge) -ticlCandidateFromTracksters = _ticlCandidateFromTrackstersProducer.clone( - tracksterCollections = ["ticlTrackstersMerge"], - # A possible alternative for momentum computation: - # momentumPlugin = dict(plugin="TracksterP4FromTrackAndPCA") - ) + pfTICL = _pfTICLProducer.clone() -ticlPFTask = cms.Task(ticlCandidateFromTracksters, pfTICL) +ticlPFTask = cms.Task(pfTICL) iterTICLTask = cms.Task(ticlLayerTileTask - ,ticlMIPStepTask - ,ticlTrkStepTask + ,ticlTrkEMStepTask ,ticlEMStepTask + ,ticlTrkStepTask ,ticlHADStepTask ,ticlTracksterMergeTask ,ticlPFTask ) +ticlLayerTileHFNose = ticlLayerTileProducer.clone( + detector = 'HFNose' +) + +ticlLayerTileHFNoseTask = cms.Task(ticlLayerTileHFNose) + +iterHFNoseTICLTask = cms.Task( + ticlLayerTileHFNoseTask, + ticlHFNoseMIPStepTask, + ticlHFNoseEMStepTask +) + def injectTICLintoPF(process): if getattr(process,'particleFlowTmp', None): process.particleFlowTmp.src = ['particleFlowTmpBarrel', 'pfTICL'] diff --git a/RecoHGCal/TICL/python/ticl_iterations.py b/RecoHGCal/TICL/python/ticl_iterations.py index 09bd7f9e27597..4d54b16a6b8ab 100644 --- a/RecoHGCal/TICL/python/ticl_iterations.py +++ b/RecoHGCal/TICL/python/ticl_iterations.py @@ -39,11 +39,11 @@ def TICL_iterations_withReco(process): ) process.trackstersTrk = trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersTrk", "Trk"), + filtered_mask = "filteredLayerClustersTrk:Trk", seeding_regions = "ticlSeedingTrk", - missing_layers = 3, - min_clusters_per_ntuplet = 5, - min_cos_theta = 0.99, # ~10 degrees + skip_layers = 3, + min_layers_per_trackster = 5, + min_cos_theta = 0.99, # ~10 degrees min_cos_pointing = 0.9 ) @@ -64,10 +64,10 @@ def TICL_iterations_withReco(process): ) process.trackstersMIP = trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersMIP", "MIP"), + filtered_mask = "filteredLayerClustersMIP:MIP", seeding_regions = "ticlSeedingGlobal", - missing_layers = 3, - min_clusters_per_ntuplet = 15, + skip_layers = 3, + min_layers_per_trackster = 15, min_cos_theta = 0.99, # ~10 degrees min_cos_pointing = 0.9, out_in_dfs = False, @@ -89,10 +89,10 @@ def TICL_iterations_withReco(process): process.trackstersEM = trackstersProducer.clone( max_out_in_hops = 4, original_mask = "trackstersMIP", - filtered_mask = cms.InputTag("filteredLayerClusters", "algo8"), + filtered_mask = "filteredLayerClusters:algo8", seeding_regions = "ticlSeedingGlobal", - missing_layers = 1, - min_clusters_per_ntuplet = 10, + skip_layers = 1, + min_layers_per_trackster = 10, min_cos_theta = 0.984, # ~10 degrees min_cos_pointing = 0.9 # ~26 degrees ) @@ -103,11 +103,11 @@ def TICL_iterations_withReco(process): process.trackstersHAD = trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClusters", "algo8"), + filtered_mask = "filteredLayerClusters:algo8", seeding_regions = "ticlSeedingGlobal", - missing_layers = 2, - min_clusters_per_ntuplet = 10, - min_cos_theta = 0.8, + skip_layers = 2, + min_layers_per_trackster = 10, + min_cos_theta = 0.8, min_cos_pointing = 0.7 ) @@ -142,13 +142,13 @@ def TICL_iterations_withReco(process): process.ticlPFValidation = ticlPFValidation process.hgcalValidation.insert(-1, process.ticlPFValidation) - + if getattr(process,'hgcalValidator'): - process.hgcalValidator.label_lcl = cms.InputTag("hgcalLayerClusters") - process.hgcalValidator.label_mcl = cms.VInputTag(cms.InputTag("multiClustersFromTrackstersEM", "MultiClustersFromTracksterByCA"), cms.InputTag("multiClustersFromTrackstersHAD", "MultiClustersFromTracksterByCA")) + process.hgcalValidator.label_lcl = "hgcalLayerClusters" + process.hgcalValidator.label_mcl = ["multiClustersFromTrackstersEM:MultiClustersFromTracksterByCA", "multiClustersFromTrackstersHAD:MultiClustersFromTracksterByCA"] process.hgcalValidator.domulticlustersPlots = True - + return process @@ -171,10 +171,10 @@ def TICL_iterations(process): ) process.trackstersMIP = trackstersProducer.clone( - filtered_mask = cms.InputTag("filteredLayerClustersMIP", "MIP"), + filtered_mask = "filteredLayerClustersMIP:MIP", seeding_regions = "ticlSeedingGlobal", - missing_layers = 3, - min_clusters_per_ntuplet = 15, + skip_layers = 3, + min_layers_per_trackster = 15, min_cos_theta = 0.99, # ~10 degrees ) @@ -192,10 +192,10 @@ def TICL_iterations(process): process.tracksters = trackstersProducer.clone( original_mask = "trackstersMIP", - filtered_mask = cms.InputTag("filteredLayerClusters", "algo8"), + filtered_mask = "filteredLayerClusters:algo8", seeding_regions = "ticlSeedingGlobal", - missing_layers = 2, - min_clusters_per_ntuplet = 15, + skip_layers = 2, + min_layers_per_trackster = 15, min_cos_theta = 0.94, # ~20 degrees min_cos_pointing = 0.7 ) diff --git a/RecoLocalCalo/HGCalRecAlgos/interface/HGCal3DClustering.h b/RecoLocalCalo/HGCalRecAlgos/interface/HGCal3DClustering.h index e12d2620d064c..2eaec6c080e93 100644 --- a/RecoLocalCalo/HGCalRecAlgos/interface/HGCal3DClustering.h +++ b/RecoLocalCalo/HGCalRecAlgos/interface/HGCal3DClustering.h @@ -10,7 +10,6 @@ #include "RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h" #include "RecoLocalCalo/HGCalRecAlgos/interface/ClusterTools.h" -#include "RecoLocalCalo/HGCalRecProducers/interface/HGCalClusteringAlgoBase.h" #include "CommonTools/RecoAlgos/interface/KDTreeLinkerAlgo.h" @@ -36,7 +35,9 @@ class HGCal3DClustering { void getEvent(const edm::Event& ev) { clusterTools->getEvent(ev); } void getEventSetup(const edm::EventSetup& es) { clusterTools->getEventSetup(es); - rhtools_.getEventSetup(es); + edm::ESHandle geom; + es.get().get(geom); + rhtools_.setGeometry(*geom); maxlayer = rhtools_.lastLayerBH(); points.clear(); minpos.clear(); diff --git a/RecoLocalCalo/HGCalRecAlgos/interface/HGCalDepthPreClusterer.h b/RecoLocalCalo/HGCalRecAlgos/interface/HGCalDepthPreClusterer.h index 1be09da6c8251..ad74c02963802 100644 --- a/RecoLocalCalo/HGCalRecAlgos/interface/HGCalDepthPreClusterer.h +++ b/RecoLocalCalo/HGCalRecAlgos/interface/HGCalDepthPreClusterer.h @@ -35,7 +35,9 @@ class HGCalDepthPreClusterer { void getEvent(const edm::Event& ev) { clusterTools->getEvent(ev); } void getEventSetup(const edm::EventSetup& es) { clusterTools->getEventSetup(es); - rhtools_.getEventSetup(es); + edm::ESHandle geom; + es.get().get(geom); + rhtools_.setGeometry(*geom); } typedef std::vector ClusterCollection; diff --git a/RecoLocalCalo/HGCalRecAlgos/interface/HGCalRecHitAbsAlgo.h b/RecoLocalCalo/HGCalRecAlgos/interface/HGCalRecHitAbsAlgo.h index 66611fca86844..d880716869f09 100644 --- a/RecoLocalCalo/HGCalRecAlgos/interface/HGCalRecHitAbsAlgo.h +++ b/RecoLocalCalo/HGCalRecAlgos/interface/HGCalRecHitAbsAlgo.h @@ -22,7 +22,7 @@ class HGCalRecHitAbsAlgo { /// Destructor virtual ~HGCalRecHitAbsAlgo(){}; - inline void set(const edm::EventSetup& es) { rhtools_.getEventSetup(es); } + inline void set(const CaloGeometry& geom) { rhtools_.setGeometry(geom); } /// make rechits from dataframes virtual void setLayerWeights(const std::vector& weights){}; diff --git a/RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h b/RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h index 2ef4de13a1cf0..f18c1cafb8d33 100644 --- a/RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h +++ b/RecoLocalCalo/HGCalRecAlgos/interface/RecHitTools.h @@ -7,6 +7,8 @@ #include "DataFormats/DetId/interface/DetId.h" #include "DataFormats/ForwardDetId/interface/ForwardSubdetector.h" #include "DataFormats/ForwardDetId/interface/HFNoseDetId.h" +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" +#include "Geometry/Records/interface/CaloGeometryRecord.h" class CaloGeometry; class CaloSubdetectorGeometry; @@ -23,8 +25,7 @@ namespace hgcal { RecHitTools() : geom_(nullptr), fhOffset_(0), bhOffset_(0), fhLastLayer_(0), noseLastLayer_(0), geometryType_(0) {} ~RecHitTools() {} - void getEvent(const edm::Event&); - void getEventSetup(const edm::EventSetup&); + void setGeometry(CaloGeometry const&); const CaloSubdetectorGeometry* getSubdetectorGeometry(const DetId& id) const; GlobalPoint getPosition(const DetId& id) const; diff --git a/RecoLocalCalo/HGCalRecAlgos/src/ClusterTools.cc b/RecoLocalCalo/HGCalRecAlgos/src/ClusterTools.cc index 778db1264fb31..098c5b3990510 100644 --- a/RecoLocalCalo/HGCalRecAlgos/src/ClusterTools.cc +++ b/RecoLocalCalo/HGCalRecAlgos/src/ClusterTools.cc @@ -5,6 +5,7 @@ #include "DataFormats/HcalDetId/interface/HcalSubdetector.h" #include "Geometry/HGCalGeometry/interface/HGCalGeometry.h" #include "Geometry/Records/interface/IdealGeometryRecord.h" +#include "Geometry/CaloGeometry/interface/CaloGeometry.h" #include "FWCore/Framework/interface/ESHandle.h" @@ -25,7 +26,6 @@ ClusterTools::ClusterTools(const edm::ParameterSet& conf, edm::ConsumesCollector bhtok(sumes.consumes(conf.getParameter("HGCBHInput"))) {} void ClusterTools::getEvent(const edm::Event& ev) { - rhtools_.getEvent(ev); edm::Handle temp; ev.getByToken(eetok, temp); eerh_ = temp.product(); @@ -35,7 +35,11 @@ void ClusterTools::getEvent(const edm::Event& ev) { bhrh_ = temp.product(); } -void ClusterTools::getEventSetup(const edm::EventSetup& es) { rhtools_.getEventSetup(es); } +void ClusterTools::getEventSetup(const edm::EventSetup& es) { + edm::ESHandle geom; + es.get().get(geom); + rhtools_.setGeometry(*geom); +} float ClusterTools::getClusterHadronFraction(const reco::CaloCluster& clus) const { float energy = 0.f, energyHad = 0.f; diff --git a/RecoLocalCalo/HGCalRecAlgos/src/RecHitTools.cc b/RecoLocalCalo/HGCalRecAlgos/src/RecHitTools.cc index fafea478bce5b..2d99384d5b197 100644 --- a/RecoLocalCalo/HGCalRecAlgos/src/RecHitTools.cc +++ b/RecoLocalCalo/HGCalRecAlgos/src/RecHitTools.cc @@ -65,13 +65,8 @@ namespace { } // namespace -void RecHitTools::getEvent(const edm::Event& ev) {} - -void RecHitTools::getEventSetup(const edm::EventSetup& es) { - edm::ESHandle geom; - es.get().get(geom); - - geom_ = geom.product(); +void RecHitTools::setGeometry(const CaloGeometry& geom) { + geom_ = &geom; unsigned int wmaxEE(0), wmaxFH(0); auto geomEE = static_cast( geom_->getSubdetectorGeometry(DetId::HGCalEE, ForwardSubdetector::ForwardEmpty)); @@ -364,6 +359,7 @@ unsigned int RecHitTools::getLayerWithOffset(const DetId& id) const { } else if (id.det() == DetId::Hcal && id.subdetId() == HcalEndcap) { layer += bhOffset_; } + // no need to add offset for HFnose return layer; } @@ -417,13 +413,12 @@ bool RecHitTools::isHalfCell(const DetId& id) const { } bool RecHitTools::isSilicon(const DetId& id) const { - bool issilicon = false; - if (id.det() == DetId::HGCalEE || id.det() == DetId::HGCalHSi) - issilicon = true; - return issilicon; + return (id.det() == DetId::HGCalEE || id.det() == DetId::HGCalHSi || + (id.det() == DetId::Forward && id.subdetId() == static_cast(HFNose))); } bool RecHitTools::isOnlySilicon(const unsigned int layer) const { + // HFnose TODO bool isonlysilicon = (layer % bhLastLayer_) < bhOffset_; return isonlysilicon; } diff --git a/RecoLocalCalo/HGCalRecProducers/interface/HGCalClusteringAlgoBase.h b/RecoLocalCalo/HGCalRecProducers/interface/HGCalClusteringAlgoBase.h index 3d7dbf4d7b0c0..11dd849ea1f80 100644 --- a/RecoLocalCalo/HGCalRecProducers/interface/HGCalClusteringAlgoBase.h +++ b/RecoLocalCalo/HGCalRecProducers/interface/HGCalClusteringAlgoBase.h @@ -59,7 +59,9 @@ class HGCalClusteringAlgoBase { virtual void getEventSetupPerAlgorithm(const edm::EventSetup &es) {} inline void getEventSetup(const edm::EventSetup &es) { - rhtools_.getEventSetup(es); + edm::ESHandle geom; + es.get().get(geom); + rhtools_.setGeometry(*geom); maxlayer_ = rhtools_.lastLayer(isNose_); lastLayerEE_ = rhtools_.lastLayerEE(isNose_); lastLayerFH_ = rhtools_.lastLayerFH(); diff --git a/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.cc b/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.cc index 54a92afcf5cee..b6a7739080dfb 100644 --- a/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.cc +++ b/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.cc @@ -44,8 +44,12 @@ void HGCalCLUEAlgoT::populate(const HGCRecHitCollection& hits) { if (dependSensor_) { int thickness_index = rhtools_.getSiThickIndex(detid); if (thickness_index == -1) - thickness_index = 3; + thickness_index = maxNumberOfThickIndices_; + double storedThreshold = thresholds_[layerOnSide][thickness_index]; + if (detid.det() == DetId::HGCalHSi || detid.subdetId() == HGCHEF) { + storedThreshold = thresholds_[layerOnSide][thickness_index + deltasi_index_regemfac_]; + } sigmaNoise = v_sigmaNoise_[layerOnSide][thickness_index]; if (hgrh.energy() < storedThreshold) @@ -210,6 +214,9 @@ math::XYZPoint HGCalCLUEAlgoT::calculatePosition(const std::vector& v, c float x_log = 0.f; float y_log = 0.f; for (auto i : v) { + //for silicon only just use 1+6 cells = 1.3cm for all thicknesses + if (distance2(i, maxEnergyIndex, layerId, false) > positionDeltaRho2_) + continue; float rhEnergy = cellsOnLayer.weight[i]; float Wi = std::max(thresholdW0_[thick] + std::log(rhEnergy / total_weight), 0.); x_log += cellsOnLayer.x[i] * Wi; @@ -507,8 +514,9 @@ void HGCalCLUEAlgoT::computeThreshold() { // To support the TDR geometry and also the post-TDR one (v9 onwards), we // need to change the logic of the vectors containing signal to noise and // thresholds. The first 3 indices will keep on addressing the different - // thicknesses of the Silicon detectors, while the last one, number 3 (the - // fourth) will address the Scintillators. This change will support both + // thicknesses of the Silicon detectors in CE_E , the next 3 indices will address + // the thicknesses of the Silicon detectors in CE_H, while the last one, number 6 (the + // seventh) will address the Scintillators. This change will support both // geometries at the same time. if (initialized_) @@ -517,13 +525,13 @@ void HGCalCLUEAlgoT::computeThreshold() { initialized_ = true; std::vector dummy; - const unsigned maxNumberOfThickIndices = 3; - dummy.resize(maxNumberOfThickIndices + !isNose_, 0); // +1 to accomodate for the Scintillators + + dummy.resize(maxNumberOfThickIndices_ + !isNose_, 0); // +1 to accomodate for the Scintillators thresholds_.resize(maxlayer_, dummy); v_sigmaNoise_.resize(maxlayer_, dummy); for (unsigned ilayer = 1; ilayer <= maxlayer_; ++ilayer) { - for (unsigned ithick = 0; ithick < maxNumberOfThickIndices; ++ithick) { + for (unsigned ithick = 0; ithick < maxNumberOfThickIndices_; ++ithick) { float sigmaNoise = 0.001f * fcPerEle_ * nonAgedNoises_[ithick] * dEdXweights_[ilayer] / (fcPerMip_[ithick] * thicknessCorrection_[ithick]); thresholds_[ilayer - 1][ithick] = sigmaNoise * ecut_; @@ -535,9 +543,9 @@ void HGCalCLUEAlgoT::computeThreshold() { } if (!isNose_) { - float scintillators_sigmaNoise = 0.001f * noiseMip_ * dEdXweights_[ilayer]; - thresholds_[ilayer - 1][maxNumberOfThickIndices] = ecut_ * scintillators_sigmaNoise; - v_sigmaNoise_[ilayer - 1][maxNumberOfThickIndices] = scintillators_sigmaNoise; + float scintillators_sigmaNoise = 0.001f * noiseMip_ * dEdXweights_[ilayer] / sciThicknessCorrection_; + thresholds_[ilayer - 1][maxNumberOfThickIndices_] = ecut_ * scintillators_sigmaNoise; + v_sigmaNoise_[ilayer - 1][maxNumberOfThickIndices_] = scintillators_sigmaNoise; LogDebug("HGCalCLUEAlgo") << "ilayer: " << ilayer << " noiseMip: " << noiseMip_ << " scintillators_sigmaNoise: " << scintillators_sigmaNoise << "\n"; } diff --git a/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.h b/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.h index bfca51b3c0384..da3167bbef694 100644 --- a/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.h +++ b/RecoLocalCalo/HGCalRecProducers/plugins/HGCalCLUEAlgo.h @@ -34,15 +34,19 @@ class HGCalCLUEAlgoT : public HGCalClusteringAlgoBase { (HGCalClusteringAlgoBase::VerbosityLevel)ps.getUntrackedParameter("verbosity", 3), reco::CaloCluster::undefined), thresholdW0_(ps.getParameter>("thresholdW0")), + positionDeltaRho2_(ps.getParameter("positionDeltaRho2")), vecDeltas_(ps.getParameter>("deltac")), kappa_(ps.getParameter("kappa")), ecut_(ps.getParameter("ecut")), dependSensor_(ps.getParameter("dependSensor")), dEdXweights_(ps.getParameter>("dEdXweights")), thicknessCorrection_(ps.getParameter>("thicknessCorrection")), + sciThicknessCorrection_(ps.getParameter("sciThicknessCorrection")), + deltasi_index_regemfac_(ps.getParameter("deltasi_index_regemfac")), + maxNumberOfThickIndices_(ps.getParameter("maxNumberOfThickIndices")), fcPerMip_(ps.getParameter>("fcPerMip")), fcPerEle_(ps.getParameter("fcPerEle")), - nonAgedNoises_(ps.getParameter("noises").getParameter>("values")), + nonAgedNoises_(ps.getParameter>("noises")), noiseMip_(ps.getParameter("noiseMip").getParameter("noise_MIP")), use2x2_(ps.getParameter("use2x2")), initialized_(false) {} @@ -81,6 +85,7 @@ class HGCalCLUEAlgoT : public HGCalClusteringAlgoBase { static void fillPSetDescription(edm::ParameterSetDescription& iDesc) { iDesc.add>("thresholdW0", {2.9, 2.9, 2.9}); + iDesc.add("positionDeltaRho2", 1.69); iDesc.add>("deltac", { 1.3, @@ -94,11 +99,12 @@ class HGCalCLUEAlgoT : public HGCalClusteringAlgoBase { iDesc.addUntracked("verbosity", 3); iDesc.add>("dEdXweights", {}); iDesc.add>("thicknessCorrection", {}); + iDesc.add("sciThicknessCorrection", 0.9); + iDesc.add("deltasi_index_regemfac", 3); + iDesc.add("maxNumberOfThickIndices", 6); iDesc.add>("fcPerMip", {}); iDesc.add("fcPerEle", 0.0); - edm::ParameterSetDescription descNestedNoises; - descNestedNoises.add>("values", {}); - iDesc.add("noises", descNestedNoises); + iDesc.add>("noises", {}); edm::ParameterSetDescription descNestedNoiseMIP; descNestedNoiseMIP.add("scaleByDose", false); descNestedNoiseMIP.add("scaleByDoseAlgo", 0); @@ -114,6 +120,7 @@ class HGCalCLUEAlgoT : public HGCalClusteringAlgoBase { private: // To compute the cluster position std::vector thresholdW0_; + const double positionDeltaRho2_; // The two parameters used to identify clusters std::vector vecDeltas_; @@ -130,6 +137,9 @@ class HGCalCLUEAlgoT : public HGCalClusteringAlgoBase { bool dependSensor_; std::vector dEdXweights_; std::vector thicknessCorrection_; + double sciThicknessCorrection_; + int deltasi_index_regemfac_; + unsigned maxNumberOfThickIndices_; std::vector fcPerMip_; double fcPerEle_; std::vector nonAgedNoises_; diff --git a/RecoLocalCalo/HGCalRecProducers/plugins/HGCalRecHitWorkerSimple.cc b/RecoLocalCalo/HGCalRecProducers/plugins/HGCalRecHitWorkerSimple.cc index fffceca3877fa..24c1139719d2b 100644 --- a/RecoLocalCalo/HGCalRecProducers/plugins/HGCalRecHitWorkerSimple.cc +++ b/RecoLocalCalo/HGCalRecProducers/plugins/HGCalRecHitWorkerSimple.cc @@ -1,16 +1,19 @@ #include "RecoLocalCalo/HGCalRecProducers/plugins/HGCalRecHitWorkerSimple.h" -#include "DataFormats/ForwardDetId/interface/HGCalDetId.h" + +#include + +#include "CommonTools/Utils/interface/StringToEnumValue.h" #include "DataFormats/ForwardDetId/interface/ForwardSubdetector.h" -#include "FWCore/Framework/interface/EventSetup.h" +#include "DataFormats/ForwardDetId/interface/HGCalDetId.h" +#include "DataFormats/HcalDetId/interface/HcalSubdetector.h" #include "FWCore/Framework/interface/Event.h" +#include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" -#include "CommonTools/Utils/interface/StringToEnumValue.h" -#include "DataFormats/HcalDetId/interface/HcalSubdetector.h" #include "RecoLocalCalo/HGCalRecProducers/interface/ComputeClusterTime.h" HGCalRecHitWorkerSimple::HGCalRecHitWorkerSimple(const edm::ParameterSet& ps) : HGCalRecHitWorkerBaseClass(ps) { - rechitMaker_.reset(new HGCalRecHitSimpleAlgo()); - tools_.reset(new hgcal::RecHitTools()); + rechitMaker_ = std::make_unique(); + tools_ = std::make_unique(); constexpr float keV2GeV = 1e-6; // HGCee constants hgcEE_keV2DIGI_ = ps.getParameter("HGCEE_keV2DIGI"); @@ -56,7 +59,8 @@ HGCalRecHitWorkerSimple::HGCalRecHitWorkerSimple(const edm::ParameterSet& ps) : rcorr_.push_back(1.0 / corr); } // here for scintillator - rcorrscint_ = ps.getParameter("sciThicknessCorrection"); + rcorrscint_ = 1.0 / ps.getParameter("sciThicknessCorrection"); + //This is for the index position in CE_H silicon thickness cases deltasi_index_regemfac_ = ps.getParameter("deltasi_index_regemfac"); const auto& rcorrnose = ps.getParameter >("thicknessNoseCorrection"); @@ -87,8 +91,10 @@ HGCalRecHitWorkerSimple::HGCalRecHitWorkerSimple(const edm::ParameterSet& ps) : } void HGCalRecHitWorkerSimple::set(const edm::EventSetup& es) { - tools_->getEventSetup(es); - rechitMaker_->set(es); + edm::ESHandle geom; + es.get().get(geom); + tools_->setGeometry(*geom); + rechitMaker_->set(*geom); if (hgcEE_isSiFE_) { edm::ESHandle hgceeGeoHandle; es.get().get("HGCalEESensitive", hgceeGeoHandle); @@ -180,7 +186,7 @@ bool HGCalRecHitWorkerSimple::run(const edm::Event& evt, break; case hgcbh: rechitMaker_->setADCToGeVConstant(float(hgchebUncalib2GeV_)); - sigmaNoiseGeV = 1e-3 * hgcHEB_noise_MIP_ * weights_[layer]; + sigmaNoiseGeV = 1e-3 * hgcHEB_noise_MIP_ * weights_[layer] * rcorrscint_; break; case hgchfnose: rechitMaker_->setADCToGeVConstant(float(hgchfnoseUncalib2GeV_)); diff --git a/RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py b/RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py index 222073a7f9eb9..5ed92d94ed915 100644 --- a/RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py +++ b/RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py @@ -6,29 +6,37 @@ from RecoLocalCalo.HGCalRecProducers.HGCalUncalibRecHit_cfi import HGCalUncalibRecHit -from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import fC_per_ele, hgceeDigitizer, hgchebackDigitizer, hfnoseDigitizer +from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import fC_per_ele, HGCAL_noises, hgceeDigitizer, hgchebackDigitizer, hfnoseDigitizer -hgcalLayerClusters = hgcalLayerClusters_.clone() - -hgcalLayerClusters.timeOffset = hgceeDigitizer.tofDelay -hgcalLayerClusters.plugin.dEdXweights = cms.vdouble(dEdX.weights) -hgcalLayerClusters.plugin.fcPerMip = cms.vdouble(HGCalUncalibRecHit.HGCEEConfig.fCPerMIP) -hgcalLayerClusters.plugin.thicknessCorrection = cms.vdouble(HGCalRecHit.thicknessCorrection) -hgcalLayerClusters.plugin.fcPerEle = cms.double(fC_per_ele) -hgcalLayerClusters.plugin.noises = cms.PSet(refToPSet_ = cms.string('HGCAL_noises')) -hgcalLayerClusters.plugin.noiseMip = hgchebackDigitizer.digiCfg.noise +hgcalLayerClusters = hgcalLayerClusters_.clone( + timeOffset = hgceeDigitizer.tofDelay, + plugin = dict( + dEdXweights = dEdX.weights.value(), + #With the introduction of 7 regional factors (6 for silicon plus 1 for scintillator), + #we extend fcPerMip (along with noises below) so that it is guaranteed that they have 6 entries. + fcPerMip = HGCalUncalibRecHit.HGCEEConfig.fCPerMIP.value() + HGCalUncalibRecHit.HGCHEFConfig.fCPerMIP.value(), + thicknessCorrection = HGCalRecHit.thicknessCorrection.value(), + sciThicknessCorrection = HGCalRecHit.sciThicknessCorrection.value(), + deltasi_index_regemfac = HGCalRecHit.deltasi_index_regemfac.value(), + fcPerEle = fC_per_ele, + #Extending noises as fcPerMip, see comment above. + noises = HGCAL_noises.values.value() + HGCAL_noises.values.value(), + noiseMip = hgchebackDigitizer.digiCfg.noise.value() + ) +) hgcalLayerClustersHFNose = hgcalLayerClusters_.clone( detector = 'HFNose', - timeOffset = hfnoseDigitizer.tofDelay, - nHitsTime = cms.uint32(3), + timeOffset = hfnoseDigitizer.tofDelay.value(), + nHitsTime = 3, plugin = dict( - dEdXweights = dEdX.weightsNose, - fcPerMip = HGCalUncalibRecHit.HGCHFNoseConfig.fCPerMIP, - thicknessCorrection = HGCalRecHit.thicknessNoseCorrection, + dEdXweights = dEdX.weightsNose.value(), + maxNumberOfThickIndices = 3, + fcPerMip = HGCalUncalibRecHit.HGCHFNoseConfig.fCPerMIP.value(), + thicknessCorrection = HGCalRecHit.thicknessNoseCorrection.value(), fcPerEle = fC_per_ele, - noises = dict(refToPSet_ = cms.string('HGCAL_noises')), - noiseMip = hgchebackDigitizer.digiCfg.noise + noises = HGCAL_noises.values.value(), + noiseMip = hgchebackDigitizer.digiCfg.noise.value() ) ) diff --git a/RecoParticleFlow/PFClusterProducer/interface/PFHGCalRecHitCreator.h b/RecoParticleFlow/PFClusterProducer/interface/PFHGCalRecHitCreator.h index 3809c1ec02500..73d4a8197bad4 100644 --- a/RecoParticleFlow/PFClusterProducer/interface/PFHGCalRecHitCreator.h +++ b/RecoParticleFlow/PFClusterProducer/interface/PFHGCalRecHitCreator.h @@ -36,7 +36,9 @@ class PFHGCalRecHitCreator : public PFRecHitCreatorBase { const edm::Event& iEvent, const edm::EventSetup& iSetup) override { // Setup RecHitTools to properly compute the position of the HGCAL Cells vie their DetIds - recHitTools_.getEventSetup(iSetup); + edm::ESHandle geoHandle; + iSetup.get().get(geoHandle); + recHitTools_.setGeometry(*geoHandle); for (unsigned int i = 0; i < qualityTests_.size(); ++i) { qualityTests_.at(i)->beginEvent(iEvent, iSetup); @@ -46,8 +48,6 @@ class PFHGCalRecHitCreator : public PFRecHitCreatorBase { iEvent.getByToken(recHitToken_, recHitHandle); const HGCRecHitCollection& rechits = *recHitHandle; - edm::ESHandle geoHandle; - iSetup.get().get(geoHandle); const CaloGeometry* geom = geoHandle.product(); unsigned skipped_rechits = 0; diff --git a/RecoParticleFlow/PFClusterProducer/plugins/PFClusterFromHGCalMultiCluster.cc b/RecoParticleFlow/PFClusterProducer/plugins/PFClusterFromHGCalMultiCluster.cc index 33d6362ce04f4..bd30a46aaec10 100644 --- a/RecoParticleFlow/PFClusterProducer/plugins/PFClusterFromHGCalMultiCluster.cc +++ b/RecoParticleFlow/PFClusterProducer/plugins/PFClusterFromHGCalMultiCluster.cc @@ -30,6 +30,11 @@ void PFClusterFromHGCalMultiCluster::buildClusters(const edm::Handle geom; + es.get().get(geom); + rhtools_.setGeometry(*geom); +} void RealisticSimClusterMapper::buildClusters(const edm::Handle& input, const std::vector& rechitMask, diff --git a/Validation/HGCalValidation/plugins/HGCalHitCalibration.cc b/Validation/HGCalValidation/plugins/HGCalHitCalibration.cc index bf9fb01ef0da9..2d3e08ba1fe14 100644 --- a/Validation/HGCalValidation/plugins/HGCalHitCalibration.cc +++ b/Validation/HGCalValidation/plugins/HGCalHitCalibration.cc @@ -191,7 +191,9 @@ void HGCalHitCalibration::fillWithRecHits(std::map& hit void HGCalHitCalibration::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { using namespace edm; - recHitTools_.getEventSetup(iSetup); + edm::ESHandle geom; + iSetup.get().get(geom); + recHitTools_.setGeometry(*geom); Handle recHitHandleEE; Handle recHitHandleFH; diff --git a/Validation/HGCalValidation/plugins/HGCalShowerSeparation.cc b/Validation/HGCalValidation/plugins/HGCalShowerSeparation.cc index 5cad7d1ff7abd..34b209edc99a6 100644 --- a/Validation/HGCalValidation/plugins/HGCalShowerSeparation.cc +++ b/Validation/HGCalValidation/plugins/HGCalShowerSeparation.cc @@ -166,7 +166,9 @@ void HGCalShowerSeparation::bookHistograms(DQMStore::IBooker& ibooker, void HGCalShowerSeparation::analyze(const edm::Event& iEvent, const edm::EventSetup& iSetup) { using namespace edm; - recHitTools_.getEventSetup(iSetup); + edm::ESHandle geom; + iSetup.get().get(geom); + recHitTools_.setGeometry(*geom); Handle recHitHandleEE; Handle recHitHandleFH; diff --git a/Validation/HGCalValidation/plugins/HGCalValidator.cc b/Validation/HGCalValidation/plugins/HGCalValidator.cc index 8752281a0996f..13cf7095454fe 100644 --- a/Validation/HGCalValidation/plugins/HGCalValidator.cc +++ b/Validation/HGCalValidation/plugins/HGCalValidator.cc @@ -177,7 +177,9 @@ void HGCalValidator::dqmAnalyze(const edm::Event& event, event.getByToken(label_cp_effic, caloParticleHandle); std::vector const& caloParticles = *caloParticleHandle; - tools_->getEventSetup(setup); + edm::ESHandle geom; + setup.get().get(geom); + tools_->setGeometry(*geom); histoProducerAlgo_->setRecHitTools(tools_); edm::Handle recHitHandleEE; diff --git a/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc b/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc index 86e1ae1aa318f..aa875dd282edd 100644 --- a/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc +++ b/Validation/HGCalValidation/src/HGVHistoProducerAlgo.cc @@ -1726,292 +1726,287 @@ void HGVHistoProducerAlgo::multiClusters_to_CaloParticles(const Histograms& hist for (unsigned int mclId = 0; mclId < nMultiClusters; ++mclId) { const auto& hits_and_fractions = multiClusters[mclId].hitsAndFractions(); - std::unordered_map CPEnergyInMCL; - int maxCPId_byNumberOfHits = -1; - unsigned int maxCPNumberOfHitsInMCL = 0; - int maxCPId_byEnergy = -1; - float maxEnergySharedMCLandCP = 0.f; - float energyFractionOfMCLinCP = 0.f; - float energyFractionOfCPinMCL = 0.f; - - //In case of matched rechit-simhit, so matched - //caloparticle-layercluster-multicluster, he counts and saves the number of - //rechits related to the maximum energy CaloParticle out of all - //CaloParticles related to that layer cluster and multicluster. - - std::unordered_map occurrencesCPinMCL; - unsigned int numberOfNoiseHitsInMCL = 0; - unsigned int numberOfHaloHitsInMCL = 0; - unsigned int numberOfHitsInMCL = 0; - - //number of hits related to that cluster. - unsigned int numberOfHitsInLC = hits_and_fractions.size(); - numberOfHitsInMCL += numberOfHitsInLC; - std::unordered_map CPEnergyInLC; - - //hitsToCaloParticleId is a vector of ints, one for each rechit of the - //layer cluster under study. If negative, there is no simhit from any CaloParticle related. - //If positive, at least one CaloParticle has been found with matched simhit. - //In more detail: - // 1. hitsToCaloParticleId[hitId] = -3 - // TN: These represent Halo Cells(N) that have not been - // assigned to any CaloParticle (hence the T). - // 2. hitsToCaloParticleId[hitId] = -2 - // FN: There represent Halo Cells(N) that have been assigned - // to a CaloParticle (hence the F, since those should have not been marked as halo) - // 3. hitsToCaloParticleId[hitId] = -1 - // FP: These represent Real Cells(P) that have not been - // assigned to any CaloParticle (hence the F, since these are fakes) - // 4. hitsToCaloParticleId[hitId] >= 0 - // TP There represent Real Cells(P) that have been assigned - // to a CaloParticle (hence the T) - - std::vector hitsToCaloParticleId(numberOfHitsInLC); - //det id of the first hit just to make the lcLayerId variable - //which maps the layers in -z: 0->51 and in +z: 52->103 - const auto firstHitDetId = hits_and_fractions[0].first; - int lcLayerId = - recHitTools_->getLayerWithOffset(firstHitDetId) + layers * ((recHitTools_->zside(firstHitDetId) + 1) >> 1) - 1; + if (!hits_and_fractions.empty()) { + std::unordered_map CPEnergyInMCL; + int maxCPId_byNumberOfHits = -1; + unsigned int maxCPNumberOfHitsInMCL = 0; + int maxCPId_byEnergy = -1; + float maxEnergySharedMCLandCP = 0.f; + float energyFractionOfMCLinCP = 0.f; + float energyFractionOfCPinMCL = 0.f; + + //In case of matched rechit-simhit, so matched + //caloparticle-layercluster-multicluster, he counts and saves the number of + //rechits related to the maximum energy CaloParticle out of all + //CaloParticles related to that layer cluster and multicluster. + + std::unordered_map occurrencesCPinMCL; + unsigned int numberOfNoiseHitsInMCL = 0; + unsigned int numberOfHaloHitsInMCL = 0; + unsigned int numberOfHitsInMCL = 0; + + //number of hits related to that cluster. + unsigned int numberOfHitsInLC = hits_and_fractions.size(); + numberOfHitsInMCL += numberOfHitsInLC; + std::unordered_map CPEnergyInLC; + + //hitsToCaloParticleId is a vector of ints, one for each rechit of the + //layer cluster under study. If negative, there is no simhit from any CaloParticle related. + //If positive, at least one CaloParticle has been found with matched simhit. + //In more detail: + // 1. hitsToCaloParticleId[hitId] = -3 + // TN: These represent Halo Cells(N) that have not been + // assigned to any CaloParticle (hence the T). + // 2. hitsToCaloParticleId[hitId] = -2 + // FN: There represent Halo Cells(N) that have been assigned + // to a CaloParticle (hence the F, since those should have not been marked as halo) + // 3. hitsToCaloParticleId[hitId] = -1 + // FP: These represent Real Cells(P) that have not been + // assigned to any CaloParticle (hence the F, since these are fakes) + // 4. hitsToCaloParticleId[hitId] >= 0 + // TP There represent Real Cells(P) that have been assigned + // to a CaloParticle (hence the T) + + std::vector hitsToCaloParticleId(numberOfHitsInLC); + //det id of the first hit just to make the lcLayerId variable + //which maps the layers in -z: 0->51 and in +z: 52->103 + const auto firstHitDetId = hits_and_fractions[0].first; + int lcLayerId = recHitTools_->getLayerWithOffset(firstHitDetId) + + layers * ((recHitTools_->zside(firstHitDetId) + 1) >> 1) - 1; - //Loop though the hits of the layer cluster under study - for (unsigned int hitId = 0; hitId < numberOfHitsInLC; hitId++) { - DetId rh_detid = hits_and_fractions[hitId].first; - auto rhFraction = hits_and_fractions[hitId].second; + //Loop though the hits of the layer cluster under study + for (unsigned int hitId = 0; hitId < numberOfHitsInLC; hitId++) { + DetId rh_detid = hits_and_fractions[hitId].first; + auto rhFraction = hits_and_fractions[hitId].second; - //Since the hit is belonging to the layer cluster, it must also be in the rechits map. - std::map::const_iterator itcheck = hitMap.find(rh_detid); - const HGCRecHit* hit = itcheck->second; + //Since the hit is belonging to the layer cluster, it must also be in the rechits map. + std::map::const_iterator itcheck = hitMap.find(rh_detid); + const HGCRecHit* hit = itcheck->second; - //Make a map that will connect a detid (that belongs to a rechit of the layer cluster under study, - //no need to save others) with: - //1. the layer clusters that have rechits in that detid - //2. the fraction of the rechit of each layer cluster that contributes to that detid. - //So, something like: - //detid: (layer cluster 1, hit fraction) , (layer cluster 2, hit fraction), (layer cluster 3, hit fraction) ... - //here comparing with the calo particle map above the - auto hit_find_in_LC = detIdToMultiClusterId_Map.find(rh_detid); - if (hit_find_in_LC == detIdToMultiClusterId_Map.end()) { - detIdToMultiClusterId_Map[rh_detid] = std::vector(); - } - detIdToMultiClusterId_Map[rh_detid].emplace_back( - HGVHistoProducerAlgo::detIdInfoInMultiCluster{mclId, mclId, rhFraction}); + //Make a map that will connect a detid (that belongs to a rechit of the layer cluster under study, + //no need to save others) with: + //1. the layer clusters that have rechits in that detid + //2. the fraction of the rechit of each layer cluster that contributes to that detid. + //So, something like: + //detid: (layer cluster 1, hit fraction) , (layer cluster 2, hit fraction), (layer cluster 3, hit fraction) ... + //here comparing with the calo particle map above the + auto hit_find_in_LC = detIdToMultiClusterId_Map.find(rh_detid); + if (hit_find_in_LC == detIdToMultiClusterId_Map.end()) { + detIdToMultiClusterId_Map[rh_detid] = std::vector(); + } + detIdToMultiClusterId_Map[rh_detid].emplace_back( + HGVHistoProducerAlgo::detIdInfoInMultiCluster{mclId, mclId, rhFraction}); - //Check whether the rechit of the layer cluster under study has a sim hit in the same cell. - auto hit_find_in_CP = detIdToCaloParticleId_Map.find(rh_detid); + //Check whether the rechit of the layer cluster under study has a sim hit in the same cell. + auto hit_find_in_CP = detIdToCaloParticleId_Map.find(rh_detid); - // if the fraction is zero or the hit does not belong to any calo - // particle, set the caloparticleId for the hit to -1 this will - // contribute to the number of noise hits + // if the fraction is zero or the hit does not belong to any calo + // particle, set the caloparticleId for the hit to -1 this will + // contribute to the number of noise hits - // MR Remove the case in which the fraction is 0, since this could be a - // real hit that has been marked as halo. - if (rhFraction == 0.) { - hitsToCaloParticleId[hitId] = -2; - numberOfHaloHitsInMCL++; - } - if (hit_find_in_CP == detIdToCaloParticleId_Map.end()) { - hitsToCaloParticleId[hitId] -= 1; - } else { - auto maxCPEnergyInLC = 0.f; - auto maxCPId = -1; - for (auto& h : hit_find_in_CP->second) { - auto shared_fraction = std::min(rhFraction, h.fraction); - //We are in the case where there are calo particles with simhits connected via detid with the rechit under study - //So, from all layers clusters, find the rechits that are connected with a calo particle and save/calculate the - //energy of that calo particle as the sum over all rechits of the rechits energy weighted - //by the caloparticle's fraction related to that rechit. - CPEnergyInMCL[h.clusterId] += shared_fraction * hit->energy(); - //Same but for layer clusters for the cell association per layer - CPEnergyInLC[h.clusterId] += shared_fraction * hit->energy(); - //Here cPOnLayer[caloparticle][layer] describe above is set. - //Here for multi clusters with matched rechit the CP fraction times hit energy is added and saved . - cPOnLayer[h.clusterId][lcLayerId].layerClusterIdToEnergyAndScore[mclId].first += - shared_fraction * hit->energy(); - cPOnLayer[h.clusterId][lcLayerId].layerClusterIdToEnergyAndScore[mclId].second = FLT_MAX; - //cpsInMultiCluster[multicluster][CPids] - //Connects a multi cluster with all related caloparticles. - cpsInMultiCluster[mclId].emplace_back(std::make_pair(h.clusterId, FLT_MAX)); - //From all CaloParticles related to a layer cluster, he saves id and energy of the calo particle - //that after simhit-rechit matching in layer has the maximum energy. - if (shared_fraction > maxCPEnergyInLC) { - //energy is used only here. cpid is saved for multiclusters - maxCPEnergyInLC = CPEnergyInLC[h.clusterId]; - maxCPId = h.clusterId; + // MR Remove the case in which the fraction is 0, since this could be a + // real hit that has been marked as halo. + if (rhFraction == 0.) { + hitsToCaloParticleId[hitId] = -2; + numberOfHaloHitsInMCL++; + } + if (hit_find_in_CP == detIdToCaloParticleId_Map.end()) { + hitsToCaloParticleId[hitId] -= 1; + } else { + auto maxCPEnergyInLC = 0.f; + auto maxCPId = -1; + for (auto& h : hit_find_in_CP->second) { + auto shared_fraction = std::min(rhFraction, h.fraction); + //We are in the case where there are calo particles with simhits connected via detid with the rechit under study + //So, from all layers clusters, find the rechits that are connected with a calo particle and save/calculate the + //energy of that calo particle as the sum over all rechits of the rechits energy weighted + //by the caloparticle's fraction related to that rechit. + CPEnergyInMCL[h.clusterId] += shared_fraction * hit->energy(); + //Same but for layer clusters for the cell association per layer + CPEnergyInLC[h.clusterId] += shared_fraction * hit->energy(); + //Here cPOnLayer[caloparticle][layer] describe above is set. + //Here for multi clusters with matched rechit the CP fraction times hit energy is added and saved . + cPOnLayer[h.clusterId][lcLayerId].layerClusterIdToEnergyAndScore[mclId].first += + shared_fraction * hit->energy(); + cPOnLayer[h.clusterId][lcLayerId].layerClusterIdToEnergyAndScore[mclId].second = FLT_MAX; + //cpsInMultiCluster[multicluster][CPids] + //Connects a multi cluster with all related caloparticles. + cpsInMultiCluster[mclId].emplace_back(h.clusterId, FLT_MAX); + //From all CaloParticles related to a layer cluster, he saves id and energy of the calo particle + //that after simhit-rechit matching in layer has the maximum energy. + if (shared_fraction > maxCPEnergyInLC) { + //energy is used only here. cpid is saved for multiclusters + maxCPEnergyInLC = CPEnergyInLC[h.clusterId]; + maxCPId = h.clusterId; + } } + //Keep in mind here maxCPId could be zero. So, below ask for negative not including zero to count noise. + hitsToCaloParticleId[hitId] = maxCPId; } - //Keep in mind here maxCPId could be zero. So, below ask for negative not including zero to count noise. - hitsToCaloParticleId[hitId] = maxCPId; - } - } //end of loop through rechits of the layer cluster. + } //end of loop through rechits of the layer cluster. - //Loop through all rechits to count how many of them are noise and how many are matched. - //In case of matched rechit-simhit, he counts and saves the number of rechits related to the maximum energy CaloParticle. - for (auto& c : hitsToCaloParticleId) { - if (c < 0) { - numberOfNoiseHitsInMCL++; - } else { - occurrencesCPinMCL[c]++; + //Loop through all rechits to count how many of them are noise and how many are matched. + //In case of matched rechit-simhit, he counts and saves the number of rechits related to the maximum energy CaloParticle. + for (auto c : hitsToCaloParticleId) { + if (c < 0) { + numberOfNoiseHitsInMCL++; + } else { + occurrencesCPinMCL[c]++; + } } - } - //Below from all maximum energy CaloParticles, he saves the one with the largest amount - //of related rechits. - for (auto& c : occurrencesCPinMCL) { - if (c.second > maxCPNumberOfHitsInMCL) { - maxCPId_byNumberOfHits = c.first; - maxCPNumberOfHitsInMCL = c.second; + //Below from all maximum energy CaloParticles, he saves the one with the largest amount + //of related rechits. + for (auto& c : occurrencesCPinMCL) { + if (c.second > maxCPNumberOfHitsInMCL) { + maxCPId_byNumberOfHits = c.first; + maxCPNumberOfHitsInMCL = c.second; + } } - } - //Find the CaloParticle that has the maximum energy shared with the multicluster under study. - for (auto& c : CPEnergyInMCL) { - if (c.second > maxEnergySharedMCLandCP) { - maxCPId_byEnergy = c.first; - maxEnergySharedMCLandCP = c.second; - } - } - //The energy of the CaloParticle that found to have the maximum energy shared with the multicluster under study. - float totalCPEnergyFromLayerCP = 0.f; - if (maxCPId_byEnergy >= 0) { - //Loop through all layers - for (unsigned int j = 0; j < layers * 2; ++j) { - totalCPEnergyFromLayerCP = totalCPEnergyFromLayerCP + cPOnLayer[maxCPId_byEnergy][j].energy; + //Find the CaloParticle that has the maximum energy shared with the multicluster under study. + for (auto& c : CPEnergyInMCL) { + if (c.second > maxEnergySharedMCLandCP) { + maxCPId_byEnergy = c.first; + maxEnergySharedMCLandCP = c.second; + } } - energyFractionOfCPinMCL = maxEnergySharedMCLandCP / totalCPEnergyFromLayerCP; - if (multiClusters[mclId].energy() > 0.f) { - energyFractionOfMCLinCP = maxEnergySharedMCLandCP / multiClusters[mclId].energy(); + //The energy of the CaloParticle that found to have the maximum energy shared with the multicluster under study. + float totalCPEnergyFromLayerCP = 0.f; + if (maxCPId_byEnergy >= 0) { + //Loop through all layers + for (unsigned int j = 0; j < layers * 2; ++j) { + totalCPEnergyFromLayerCP = totalCPEnergyFromLayerCP + cPOnLayer[maxCPId_byEnergy][j].energy; + } + energyFractionOfCPinMCL = maxEnergySharedMCLandCP / totalCPEnergyFromLayerCP; + if (multiClusters[mclId].energy() > 0.f) { + energyFractionOfMCLinCP = maxEnergySharedMCLandCP / multiClusters[mclId].energy(); + } } - } - LogDebug("HGCalValidator") << std::setw(12) << "multiCluster" - << "\t" //LogDebug("HGCalValidator") - << std::setw(10) << "mulcl energy" - << "\t" << std::setw(5) << "nhits" - << "\t" << std::setw(12) << "noise hits" - << "\t" << std::setw(22) << "maxCPId_byNumberOfHits" - << "\t" << std::setw(8) << "nhitsCP" - << "\t" << std::setw(16) << "maxCPId_byEnergy" - << "\t" << std::setw(23) << "maxEnergySharedMCLandCP" - << "\t" << std::setw(22) << "totalCPEnergyFromAllLayerCP" - << "\t" << std::setw(22) << "energyFractionOfMCLinCP" - << "\t" << std::setw(25) << "energyFractionOfCPinMCL" - << "\t" << std::endl; - LogDebug("HGCalValidator") << std::setw(12) << mclId << "\t" //LogDebug("HGCalValidator") - << std::setw(10) << multiClusters[mclId].energy() << "\t" << std::setw(5) - << numberOfHitsInMCL << "\t" << std::setw(12) << numberOfNoiseHitsInMCL << "\t" - << std::setw(22) << maxCPId_byNumberOfHits << "\t" << std::setw(8) - << maxCPNumberOfHitsInMCL << "\t" << std::setw(16) << maxCPId_byEnergy << "\t" - << std::setw(23) << maxEnergySharedMCLandCP << "\t" << std::setw(22) - << totalCPEnergyFromLayerCP << "\t" << std::setw(22) << energyFractionOfMCLinCP << "\t" - << std::setw(25) << energyFractionOfCPinMCL << std::endl; - - } //end of loop through multi clusters + LogDebug("HGCalValidator") << std::setw(12) << "multiCluster" + << "\t" //LogDebug("HGCalValidator") + << std::setw(10) << "mulcl energy" + << "\t" << std::setw(5) << "nhits" + << "\t" << std::setw(12) << "noise hits" + << "\t" << std::setw(22) << "maxCPId_byNumberOfHits" + << "\t" << std::setw(8) << "nhitsCP" + << "\t" << std::setw(16) << "maxCPId_byEnergy" + << "\t" << std::setw(23) << "maxEnergySharedMCLandCP" + << "\t" << std::setw(22) << "totalCPEnergyFromAllLayerCP" + << "\t" << std::setw(22) << "energyFractionOfMCLinCP" + << "\t" << std::setw(25) << "energyFractionOfCPinMCL" + << "\t" << std::endl; + LogDebug("HGCalValidator") << std::setw(12) << mclId << "\t" //LogDebug("HGCalValidator") + << std::setw(10) << multiClusters[mclId].energy() << "\t" << std::setw(5) + << numberOfHitsInMCL << "\t" << std::setw(12) << numberOfNoiseHitsInMCL << "\t" + << std::setw(22) << maxCPId_byNumberOfHits << "\t" << std::setw(8) + << maxCPNumberOfHitsInMCL << "\t" << std::setw(16) << maxCPId_byEnergy << "\t" + << std::setw(23) << maxEnergySharedMCLandCP << "\t" << std::setw(22) + << totalCPEnergyFromLayerCP << "\t" << std::setw(22) << energyFractionOfMCLinCP << "\t" + << std::setw(25) << energyFractionOfCPinMCL << std::endl; + + } //end of loop through multi clusters + } //Loop through multiclusters for (unsigned int mclId = 0; mclId < nMultiClusters; ++mclId) { const auto& hits_and_fractions = multiClusters[mclId].hitsAndFractions(); - // find the unique caloparticles id contributing to the multi clusters - //cpsInMultiCluster[multicluster][CPids] - std::sort(cpsInMultiCluster[mclId].begin(), cpsInMultiCluster[mclId].end()); - auto last = std::unique(cpsInMultiCluster[mclId].begin(), cpsInMultiCluster[mclId].end()); - cpsInMultiCluster[mclId].erase(last, cpsInMultiCluster[mclId].end()); - - if (multiClusters[mclId].energy() == 0. && !cpsInMultiCluster[mclId].empty()) { - //Loop through all CaloParticles contributing to multicluster mclId. - for (auto& cpPair : cpsInMultiCluster[mclId]) { - //In case of a multi cluster with zero energy but related CaloParticles the score is set to 1. - cpPair.second = 1.; - // LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId - // << "\t CP id: \t" << cpPair.first - // << "\t score \t" << cpPair.second - // << "\n"; - LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId << "\t CP id: \t" << cpPair.first << "\t score \t" - << cpPair.second << std::endl; - histograms.h_score_multicl2caloparticle[count]->Fill(cpPair.second); + if (!hits_and_fractions.empty()) { + // find the unique caloparticles id contributing to the multi clusters + //cpsInMultiCluster[multicluster][CPids] + std::sort(cpsInMultiCluster[mclId].begin(), cpsInMultiCluster[mclId].end()); + auto last = std::unique(cpsInMultiCluster[mclId].begin(), cpsInMultiCluster[mclId].end()); + cpsInMultiCluster[mclId].erase(last, cpsInMultiCluster[mclId].end()); + + if (multiClusters[mclId].energy() == 0. && !cpsInMultiCluster[mclId].empty()) { + //Loop through all CaloParticles contributing to multicluster mclId. + for (auto& cpPair : cpsInMultiCluster[mclId]) { + //In case of a multi cluster with zero energy but related CaloParticles the score is set to 1. + cpPair.second = 1.; + LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId << "\t CP id: \t" << cpPair.first + << "\t score \t" << cpPair.second << std::endl; + histograms.h_score_multicl2caloparticle[count]->Fill(cpPair.second); + } + continue; } - continue; - } - // Compute the correct normalization - float invMultiClusterEnergyWeight = 0.f; - for (auto const& haf : multiClusters[mclId].hitsAndFractions()) { - invMultiClusterEnergyWeight += - (haf.second * hitMap.at(haf.first)->energy()) * (haf.second * hitMap.at(haf.first)->energy()); - } - invMultiClusterEnergyWeight = 1.f / invMultiClusterEnergyWeight; + // Compute the correct normalization + float invMultiClusterEnergyWeight = 0.f; + for (auto const& haf : multiClusters[mclId].hitsAndFractions()) { + invMultiClusterEnergyWeight += + (haf.second * hitMap.at(haf.first)->energy()) * (haf.second * hitMap.at(haf.first)->energy()); + } + invMultiClusterEnergyWeight = 1.f / invMultiClusterEnergyWeight; + + unsigned int numberOfHitsInLC = hits_and_fractions.size(); + for (unsigned int i = 0; i < numberOfHitsInLC; ++i) { + DetId rh_detid = hits_and_fractions[i].first; + float rhFraction = hits_and_fractions[i].second; + bool hitWithNoCP = false; + + auto hit_find_in_CP = detIdToCaloParticleId_Map.find(rh_detid); + if (hit_find_in_CP == detIdToCaloParticleId_Map.end()) + hitWithNoCP = true; + auto itcheck = hitMap.find(rh_detid); + const HGCRecHit* hit = itcheck->second; + float hitEnergyWeight = hit->energy() * hit->energy(); - unsigned int numberOfHitsInLC = hits_and_fractions.size(); - for (unsigned int i = 0; i < numberOfHitsInLC; ++i) { - DetId rh_detid = hits_and_fractions[i].first; - float rhFraction = hits_and_fractions[i].second; - bool hitWithNoCP = false; + for (auto& cpPair : cpsInMultiCluster[mclId]) { + float cpFraction = 0.f; + if (!hitWithNoCP) { + auto findHitIt = std::find(detIdToCaloParticleId_Map[rh_detid].begin(), + detIdToCaloParticleId_Map[rh_detid].end(), + HGVHistoProducerAlgo::detIdInfoInCluster{cpPair.first, 0.f}); + if (findHitIt != detIdToCaloParticleId_Map[rh_detid].end()) { + cpFraction = findHitIt->fraction; + } + } + if (cpPair.second == FLT_MAX) { + cpPair.second = 0.f; + } + cpPair.second += + (rhFraction - cpFraction) * (rhFraction - cpFraction) * hitEnergyWeight * invMultiClusterEnergyWeight; + } + } //end of loop through rechits of layer cluster - auto hit_find_in_CP = detIdToCaloParticleId_Map.find(rh_detid); - if (hit_find_in_CP == detIdToCaloParticleId_Map.end()) - hitWithNoCP = true; - auto itcheck = hitMap.find(rh_detid); - const HGCRecHit* hit = itcheck->second; - float hitEnergyWeight = hit->energy() * hit->energy(); + //In case of a multi cluster with some energy but none related CaloParticles print some info. + if (cpsInMultiCluster[mclId].empty()) + LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId << "\tCP id:\t-1 " + << "\t score \t-1" + << "\n"; + auto score = std::min_element(std::begin(cpsInMultiCluster[mclId]), + std::end(cpsInMultiCluster[mclId]), + [](const auto& obj1, const auto& obj2) { return obj1.second < obj2.second; }); for (auto& cpPair : cpsInMultiCluster[mclId]) { - float cpFraction = 0.f; - if (!hitWithNoCP) { - auto findHitIt = std::find(detIdToCaloParticleId_Map[rh_detid].begin(), - detIdToCaloParticleId_Map[rh_detid].end(), - HGVHistoProducerAlgo::detIdInfoInCluster{cpPair.first, 0.f}); - if (findHitIt != detIdToCaloParticleId_Map[rh_detid].end()) { - cpFraction = findHitIt->fraction; - } + LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId << "\t CP id: \t" << cpPair.first << "\t score \t" + << cpPair.second << std::endl; + if (cpPair.first == score->first) { + histograms.h_score_multicl2caloparticle[count]->Fill(score->second); } - if (cpPair.second == FLT_MAX) { - cpPair.second = 0.f; + float sharedeneCPallLayers = 0.; + //Loop through all layers + for (unsigned int j = 0; j < layers * 2; ++j) { + auto const& cp_linked = cPOnLayer[cpPair.first][j].layerClusterIdToEnergyAndScore[mclId]; + sharedeneCPallLayers += cp_linked.first; + } //end of loop through layers + LogDebug("HGCalValidator") << "sharedeneCPallLayers " << sharedeneCPallLayers << std::endl; + if (cpPair.first == score->first) { + histograms.h_sharedenergy_multicl2caloparticle[count]->Fill(sharedeneCPallLayers / + multiClusters[mclId].energy()); + histograms.h_energy_vs_score_multicl2caloparticle[count]->Fill( + score->second, sharedeneCPallLayers / multiClusters[mclId].energy()); } - cpPair.second += - (rhFraction - cpFraction) * (rhFraction - cpFraction) * hitEnergyWeight * invMultiClusterEnergyWeight; } - } //end of loop through rechits of layer cluster - - //In case of a multi cluster with some energy but none related CaloParticles print some info. - if (cpsInMultiCluster[mclId].empty()) - LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId << "\tCP id:\t-1 " - << "\t score \t-1" - << "\n"; - auto score = std::min_element(std::begin(cpsInMultiCluster[mclId]), - std::end(cpsInMultiCluster[mclId]), - [](const auto& obj1, const auto& obj2) { return obj1.second < obj2.second; }); - for (auto& cpPair : cpsInMultiCluster[mclId]) { - // LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId - // << "\t CP id: \t" << cpPair.first - // << "\t score \t" << cpPair.second - // << "\n"; - LogDebug("HGCalValidator") << "multiCluster Id: \t" << mclId << "\t CP id: \t" << cpPair.first << "\t score \t" - << cpPair.second << std::endl; - if (cpPair.first == score->first) { - histograms.h_score_multicl2caloparticle[count]->Fill(score->second); - } - float sharedeneCPallLayers = 0.; - //Loop through all layers - for (unsigned int j = 0; j < layers * 2; ++j) { - auto const& cp_linked = cPOnLayer[cpPair.first][j].layerClusterIdToEnergyAndScore[mclId]; - sharedeneCPallLayers += cp_linked.first; - } //end of loop through layers - LogDebug("HGCalValidator") << "sharedeneCPallLayers " << sharedeneCPallLayers << std::endl; - if (cpPair.first == score->first) { - histograms.h_sharedenergy_multicl2caloparticle[count]->Fill(sharedeneCPallLayers / - multiClusters[mclId].energy()); - histograms.h_energy_vs_score_multicl2caloparticle[count]->Fill( - score->second, sharedeneCPallLayers / multiClusters[mclId].energy()); - } + auto assocFakeMerge = std::count_if(std::begin(cpsInMultiCluster[mclId]), + std::end(cpsInMultiCluster[mclId]), + [](const auto& obj) { return obj.second < ScoreCutMCLtoCPFakeMerge_; }); + tracksters_fakemerge[mclId] = assocFakeMerge; } - - auto assocFakeMerge = std::count_if(std::begin(cpsInMultiCluster[mclId]), - std::end(cpsInMultiCluster[mclId]), - [](const auto& obj) { return obj.second < ScoreCutMCLtoCPFakeMerge_; }); - tracksters_fakemerge[mclId] = assocFakeMerge; - } //end of loop through multiclusters std::unordered_map> score3d; @@ -2193,37 +2188,40 @@ void HGVHistoProducerAlgo::multiClusters_to_CaloParticles(const Histograms& hist // reco-level, namely fake-rate an merge-rate. In this loop we should *not* // restrict only to the selected caloParaticles. for (unsigned int mclId = 0; mclId < nMultiClusters; ++mclId) { - auto assocFakeMerge = tracksters_fakemerge[mclId]; - auto assocDuplicate = tracksters_duplicate[mclId]; - if (assocDuplicate) { - histograms.h_numDup_multicl_eta[count]->Fill(multiClusters[mclId].eta()); - histograms.h_numDup_multicl_phi[count]->Fill(multiClusters[mclId].phi()); - } - if (assocFakeMerge > 0) { - histograms.h_num_multicl_eta[count]->Fill(multiClusters[mclId].eta()); - histograms.h_num_multicl_phi[count]->Fill(multiClusters[mclId].phi()); - auto best = std::min_element(std::begin(cpsInMultiCluster[mclId]), - std::end(cpsInMultiCluster[mclId]), - [](const auto& obj1, const auto& obj2) { return obj1.second < obj2.second; }); - - //This is the shared energy taking the best caloparticle in each layer - float sharedeneCPallLayers = 0.; - //Loop through all layers - for (unsigned int j = 0; j < layers * 2; ++j) { - auto const& best_cp_linked = cPOnLayer[best->first][j].layerClusterIdToEnergyAndScore[mclId]; - sharedeneCPallLayers += best_cp_linked.first; - } //end of loop through layers - histograms.h_sharedenergy_multicl2caloparticle_vs_eta[count]->Fill( - multiClusters[mclId].eta(), sharedeneCPallLayers / multiClusters[mclId].energy()); - histograms.h_sharedenergy_multicl2caloparticle_vs_phi[count]->Fill( - multiClusters[mclId].phi(), sharedeneCPallLayers / multiClusters[mclId].energy()); - } - if (assocFakeMerge >= 2) { - histograms.h_numMerge_multicl_eta[count]->Fill(multiClusters[mclId].eta()); - histograms.h_numMerge_multicl_phi[count]->Fill(multiClusters[mclId].phi()); + const auto& hits_and_fractions = multiClusters[mclId].hitsAndFractions(); + if (!hits_and_fractions.empty()) { + auto assocFakeMerge = tracksters_fakemerge[mclId]; + auto assocDuplicate = tracksters_duplicate[mclId]; + if (assocDuplicate) { + histograms.h_numDup_multicl_eta[count]->Fill(multiClusters[mclId].eta()); + histograms.h_numDup_multicl_phi[count]->Fill(multiClusters[mclId].phi()); + } + if (assocFakeMerge > 0) { + histograms.h_num_multicl_eta[count]->Fill(multiClusters[mclId].eta()); + histograms.h_num_multicl_phi[count]->Fill(multiClusters[mclId].phi()); + auto best = std::min_element(std::begin(cpsInMultiCluster[mclId]), + std::end(cpsInMultiCluster[mclId]), + [](const auto& obj1, const auto& obj2) { return obj1.second < obj2.second; }); + + //This is the shared energy taking the best caloparticle in each layer + float sharedeneCPallLayers = 0.; + //Loop through all layers + for (unsigned int j = 0; j < layers * 2; ++j) { + auto const& best_cp_linked = cPOnLayer[best->first][j].layerClusterIdToEnergyAndScore[mclId]; + sharedeneCPallLayers += best_cp_linked.first; + } //end of loop through layers + histograms.h_sharedenergy_multicl2caloparticle_vs_eta[count]->Fill( + multiClusters[mclId].eta(), sharedeneCPallLayers / multiClusters[mclId].energy()); + histograms.h_sharedenergy_multicl2caloparticle_vs_phi[count]->Fill( + multiClusters[mclId].phi(), sharedeneCPallLayers / multiClusters[mclId].energy()); + } + if (assocFakeMerge >= 2) { + histograms.h_numMerge_multicl_eta[count]->Fill(multiClusters[mclId].eta()); + histograms.h_numMerge_multicl_phi[count]->Fill(multiClusters[mclId].phi()); + } + histograms.h_denom_multicl_eta[count]->Fill(multiClusters[mclId].eta()); + histograms.h_denom_multicl_phi[count]->Fill(multiClusters[mclId].phi()); } - histograms.h_denom_multicl_eta[count]->Fill(multiClusters[mclId].eta()); - histograms.h_denom_multicl_phi[count]->Fill(multiClusters[mclId].phi()); } } @@ -2266,6 +2264,10 @@ void HGVHistoProducerAlgo::fill_multi_cluster_histos(const Histograms& histogram for (unsigned int mclId = 0; mclId < nMultiClusters; ++mclId) { const auto layerClusters = multiClusters[mclId].clusters(); auto nLayerClusters = layerClusters.size(); + + if (nLayerClusters == 0) + continue; + if (multiClusters[mclId].z() < 0.) { tnmclmz++; } @@ -2330,18 +2332,20 @@ void HGVHistoProducerAlgo::fill_multi_cluster_histos(const Histograms& histogram //Since we want to also check for non contiguous multiclusters bool contimulti = false; //Observe that we start from 1 and go up to size - 1 element. - for (unsigned int i = 1; i < multicluster_layers_vec.size() - 1; ++i) { - if ((multicluster_layers_vec[i - 1] + 1 == multicluster_layers_vec[i]) && - (multicluster_layers_vec[i + 1] - 1 == multicluster_layers_vec[i])) { - //So, this is a multicluster with 3 contiguous layers per event - if (multiclusterInZplus) { - tncontmclpz++; - } - if (multiclusterInZminus) { - tncontmclmz++; + if (multicluster_layers_vec.size() >= 3) { + for (unsigned int i = 1; i < multicluster_layers_vec.size() - 1; ++i) { + if ((multicluster_layers_vec[i - 1] + 1 == multicluster_layers_vec[i]) && + (multicluster_layers_vec[i + 1] - 1 == multicluster_layers_vec[i])) { + //So, this is a multicluster with 3 contiguous layers per event + if (multiclusterInZplus) { + tncontmclpz++; + } + if (multiclusterInZminus) { + tncontmclmz++; + } + contimulti = true; + break; } - contimulti = true; - break; } } //Count non contiguous multiclusters @@ -2382,17 +2386,18 @@ void HGVHistoProducerAlgo::fill_multi_cluster_histos(const Histograms& histogram histograms.h_multiplicityOfLCinMCL_vs_layerclusterenergy[count]->Fill(mlp, layerClusters[lc]->energy()); } - histograms.h_multicluster_pt[count]->Fill(multiClusters[mclId].pt()); - histograms.h_multicluster_eta[count]->Fill(multiClusters[mclId].eta()); - histograms.h_multicluster_phi[count]->Fill(multiClusters[mclId].phi()); - histograms.h_multicluster_energy[count]->Fill(multiClusters[mclId].energy()); - histograms.h_multicluster_x[count]->Fill(multiClusters[mclId].x()); - histograms.h_multicluster_y[count]->Fill(multiClusters[mclId].y()); - histograms.h_multicluster_z[count]->Fill(multiClusters[mclId].z()); - histograms.h_multicluster_firstlayer[count]->Fill((float)*multicluster_layers.begin()); - histograms.h_multicluster_lastlayer[count]->Fill((float)*multicluster_layers.rbegin()); - histograms.h_multicluster_layersnum[count]->Fill((float)multicluster_layers.size()); - + if (!multicluster_layers.empty()) { + histograms.h_multicluster_pt[count]->Fill(multiClusters[mclId].pt()); + histograms.h_multicluster_eta[count]->Fill(multiClusters[mclId].eta()); + histograms.h_multicluster_phi[count]->Fill(multiClusters[mclId].phi()); + histograms.h_multicluster_energy[count]->Fill(multiClusters[mclId].energy()); + histograms.h_multicluster_x[count]->Fill(multiClusters[mclId].x()); + histograms.h_multicluster_y[count]->Fill(multiClusters[mclId].y()); + histograms.h_multicluster_z[count]->Fill(multiClusters[mclId].z()); + histograms.h_multicluster_firstlayer[count]->Fill((float)*multicluster_layers.begin()); + histograms.h_multicluster_lastlayer[count]->Fill((float)*multicluster_layers.rbegin()); + histograms.h_multicluster_layersnum[count]->Fill((float)multicluster_layers.size()); + } } //end of loop through multiclusters histograms.h_multiclusternum[count]->Fill(tnmclmz + tnmclpz);