Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
27ee522
Implement missing updateCRU function call
wiechula Dec 17, 2019
3a58137
use batch mode for DPL
wiechula Jan 16, 2020
ab06be3
add missing pragma
wiechula Jan 16, 2020
20fbf35
add missing pragma
wiechula Jan 16, 2020
9c44769
allow users to set a callback (move to public)
wiechula Jan 16, 2020
7b2d703
Different options for dumping data to file
wiechula Jan 16, 2020
b2466a9
First running version of pedestal calibration in DPL
wiechula Jan 16, 2020
2f9c26b
Add Configurable Param for DigitDump class
wiechula Jan 29, 2020
fd88eaa
Implement raw to digits in DPL
wiechula Jan 20, 2020
13457ec
Fix and extend raw to digits workflow
wiechula Feb 21, 2020
f38e976
Dump laser tracks to tree, get span of tracks from a bundle
wiechula Feb 10, 2020
c0671c2
Link based zero suppression format
wiechula Feb 20, 2020
4f475c4
Format extension and improvement
wiechula Feb 28, 2020
cc46de6
Add option to use Link base ZS in raw reco workflow
wiechula Feb 26, 2020
62f7ed3
Add generic fit on histogrammed array
wiechula Mar 4, 2020
eba28c8
add more robust fit procedure
wiechula Mar 4, 2020
f01b883
add option to store debub histograms
wiechula Mar 11, 2020
709b37c
fix math base
wiechula Mar 11, 2020
a347102
Add functionality for summary report creation
wiechula Mar 11, 2020
c05d642
add summary report option
wiechula Mar 11, 2020
19206a8
Add common utilities for drawing, saving, tokenizing, ...
wiechula Mar 16, 2020
862d126
Adapt with new common utils
wiechula Mar 16, 2020
92a1a11
macro for pedestal and threshold files for CRU configuration
wiechula Mar 29, 2020
1e79b59
Cleanup macro for CRU configuration
wiechula Mar 29, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 86 additions & 1 deletion Common/MathUtils/include/MathUtils/MathBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,100 @@
#include "TLinearFitter.h"
#include "TVectorD.h"
#include "TMath.h"
#include "TF1.h"
#include "Foption.h"
#include "HFitInterface.h"
#include "TFitResultPtr.h"
#include "TFitResult.h"
#include "Fit/Fitter.h"
#include "Fit/BinData.h"
#include "Math/WrappedMultiTF1.h"

#include <fairlogger/Logger.h>
#include "Framework/Logger.h"

namespace o2
{
namespace math_utils
{
namespace math_base
{
/// fit 1D array of histogrammed data with generic root function
///
/// The code was extracted out of ROOT to be able to do fitting on an array with histogrammed data
/// instead of root histograms.
/// It is a stripped down version, so does not provide the same functionality.
/// To be used with care.
///
/// \param[in] nbins size of the array and number of histogram bins
/// \param[in] arr array with elements
/// \param[in] xMin minimum range of the array
/// \param[in] xMax maximum range of the array
/// \param[in] func fit function
///
///
template <typename T>
TFitResultPtr fit(const size_t nBins, const T* arr, const T xMin, const T xMax, TF1& func, std::string_view option = "")
{
Foption_t fitOption;
ROOT::Fit::FitOptionsMake(ROOT::Fit::kHistogram, option.data(), fitOption);

ROOT::Fit::DataRange range(xMin, xMax);
ROOT::Fit::DataOptions opt;
ROOT::Fit::BinData fitdata(opt, range);
fitdata.Initialize(nBins, 1);

// create an empty TFitResult
std::shared_ptr<TFitResult> tfr(new TFitResult());
// create the fitter from an empty fit result
//std::shared_ptr<ROOT::Fit::Fitter> fitter(new ROOT::Fit::Fitter(std::static_pointer_cast<ROOT::Fit::FitResult>(tfr) ) );
ROOT::Fit::Fitter fitter(tfr);
//ROOT::Fit::FitConfig & fitConfig = fitter->Config();

const double binWidth = double(xMax - xMin) / double(nBins);

for (Int_t ibin = 0; ibin < nBins; ibin++) {
const double x = double(xMin) + double(ibin + 0.5) * binWidth;
const double y = double(arr[ibin]);
const double ey = std::sqrt(y);
fitdata.Add(x, y, ey);
}

const int special = func.GetNumber();
const int npar = func.GetNpar();
bool linear = func.IsLinear();
if (special == 299 + npar) {
linear = kTRUE; // for polynomial functions
}
// do not use linear fitter in these case
if (fitOption.Bound || fitOption.Like || fitOption.Errors || fitOption.Gradient || fitOption.More || fitOption.User || fitOption.Integral || fitOption.Minuit) {
linear = kFALSE;
}

if (special != 0 && !fitOption.Bound && !linear) {
if (special == 100) {
ROOT::Fit::InitGaus(fitdata, &func); // gaussian
} else if (special == 400) {
ROOT::Fit::InitGaus(fitdata, &func); // landau (use the same)
} else if (special == 200) {
ROOT::Fit::InitExpo(fitdata, &func); // exponential
}
}

if ((linear || fitOption.Gradient)) {
fitter.SetFunction(ROOT::Math::WrappedMultiTF1(func));
} else {
fitter.SetFunction(static_cast<const ROOT::Math::IParamMultiFunction&>(ROOT::Math::WrappedMultiTF1(func)));
}

// standard least square fit
const bool fitok = fitter.Fit(fitdata, fitOption.ExecPolicy);
if (!fitok) {
LOGP(warning, "bad fit");
}

return TFitResultPtr(tfr);
}

/// fast fit of an array with ranges (histogram) with gaussian function
///
/// Fitting procedure:
Expand Down
3 changes: 3 additions & 0 deletions Common/MathUtils/src/MathUtilsLinkDef.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@

#pragma link C++ namespace o2::math_utils::math_base;

#pragma link C++ function o2::math_utils::math_base::fit < float>;
#pragma link C++ function o2::math_utils::math_base::fit < double>;

#pragma link C++ function o2::math_utils::math_base::fitGaus < float>;
#pragma link C++ function o2::math_utils::math_base::fitGaus < double>;

Expand Down
3 changes: 2 additions & 1 deletion DataFormats/Detectors/TPC/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ o2_target_root_dictionary(
include/DataFormatsTPC/Defs.h
include/DataFormatsTPC/dEdxInfo.h
include/DataFormatsTPC/CompressedClusters.h
include/DataFormatsTPC/ZeroSuppression.h)
include/DataFormatsTPC/ZeroSuppression.h
include/DataFormatsTPC/ZeroSuppressionLinkBased.h)

o2_add_test(
ClusterNative
Expand Down
5 changes: 3 additions & 2 deletions DataFormats/Detectors/TPC/include/DataFormatsTPC/Defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,9 @@ enum class PadSubset : char {

/// Statistics type
enum class StatisticsType {
GausFit, ///< Use Gaus fit for pedestal and noise
MeanStdDev ///< Use mean and standard deviation
GausFit, ///< Use slow gaus fit (better fit stability)
GausFitFast, ///< Use fast gaus fit (less accurate error treatment)
MeanStdDev ///< Use mean and standard deviation
};

// default point definitions for PointND, PointNDlocal, PointNDglobal are in
Expand Down
13 changes: 13 additions & 0 deletions DataFormats/Detectors/TPC/include/DataFormatsTPC/LaserTrack.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#define ALICEO2_TPC_LASERTRACK

#include <string>
#include <gsl/span>

#include "ReconstructionDataFormats/Track.h"

Expand Down Expand Up @@ -80,6 +81,18 @@ class LaserTrackContainer
/// \return array of laser tracks
const auto& getLaserTracks() const { return mLaserTracks; }

/// dump tracks to a tree for simple visualization
static void dumpToTree(const std::string_view fileName);

/// get span with tracks in one bundle
gsl::span<const LaserTrack> getTracksInBundle(int side, int rod, int bundle)
{
const int startID = LaserTrack::NumberOfTracks / 2 * side +
LaserTrack::BundlesPerRod * LaserTrack::TracksPerBundle * rod +
LaserTrack::TracksPerBundle * bundle;
return gsl::span<const LaserTrack>(&mLaserTracks[startID], LaserTrack::TracksPerBundle);
}

private:
std::array<LaserTrack, LaserTrack::NumberOfTracks> mLaserTracks;

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
// Copyright CERN and copyright holders of ALICE O2. This software is
// distributed under the terms of the GNU General Public License v3 (GPL
// Version 3), copied verbatim in the file "COPYING".
//
// See http://alice-o2.web.cern.ch/license for full licensing information.
//
// In applying this license CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.

/// \file ZeroSuppressionLinkBased.h
/// \brief definitions to deal with the link based zero suppression format
/// \author Jens Wiechula

#ifndef ALICEO2_DATAFORMATSTPC_ZeroSuppressionLinkBased_H
#define ALICEO2_DATAFORMATSTPC_ZeroSuppressionLinkBased_H

#include <bitset>

namespace o2
{
namespace tpc
{
namespace zerosupp_link_based
{

static constexpr uint32_t DataWordSizeBits = 128; ///< size of header word and data words in bits
static constexpr uint32_t DataWordSizeBytes = DataWordSizeBits / 8; ///< size of header word and data words in bytes

/// header definition of the zero suppressed link based data format
struct Header {
union {
uint64_t word0 = 0; ///< lower 64 bits
struct { ///
uint64_t bitMaskLow : 64; ///< lower bits of the 80 bit bitmask
}; ///
}; ///
///
union { ///
uint64_t word1 = 0; ///< upper bits of the 80 bit bitmask
struct { ///
uint64_t bitMaskHigh : 16; ///< higher bits of the 80 bit bitmask
uint32_t bunchCrossing : 12; ///< bunch crossing number
uint32_t numWordsPayload : 8; ///< number of 128bit words with 12bit ADC values
uint64_t zero : 28; ///< not used
};
};

std::bitset<80> getChannelBits()
{
return std::bitset<80>((std::bitset<80>(bitMaskHigh) << 64) | std::bitset<80>(bitMaskLow));
}
};

/// empty header for

/// ADC data container
///
/// In case of zero suppressed data, the ADC values are stored with 12 bit
/// 10bit + 2bit precision
/// In case of decoded raw data, the pure 10 bit ADC values are stored
///
/// The data is packed in 128 bit words, or 2x64 bit. Each 64 bit word has 4 bit
/// padding.
/// So it is either 2 x ((5 x 12 bit) + 4 bit padding), or
/// 2 x ((6 x 10 bit) + 4 bit padding)
template <uint32_t DataBitSizeT = 12, uint32_t SignificantBitsT = 2>
struct Data {
static constexpr uint32_t ChannelsPerHalfWord = DataWordSizeBits / DataBitSizeT / 2; ///< number of ADC values in one 128b word
static constexpr uint32_t DataBitSize = DataBitSizeT; ///< number of bits of the data representation
static constexpr uint32_t SignificantBits = SignificantBitsT; ///< number of bits used for floating point precision
static constexpr uint64_t BitMask = ((uint64_t(1) << DataBitSize) - 1); ///< mask for bits
static constexpr float FloatConversion = 1.f / float(1 << SignificantBits); ///< conversion factor from integer representation to float

uint64_t adcValues[2]{}; ///< 128bit ADC values (max. 10x12bit)

/// set ADC 'value' at position 'pos' (0-9)
void setADCValue(uint32_t pos, uint64_t value)
{
const uint32_t word = pos / ChannelsPerHalfWord;
const uint32_t posInWord = pos % ChannelsPerHalfWord;

const uint64_t set = (value & BitMask) << (posInWord * DataBitSize);
const uint64_t mask = (0xFFFFFFFFFFFFFFFF ^ (BitMask << (posInWord * DataBitSize)));

adcValues[word] &= mask;
adcValues[word] |= set;
}

/// set ADC value from float
void setADCValueFloat(uint32_t pos, float value)
{
setADCValue(pos, uint64_t((value + 0.5f * FloatConversion) / FloatConversion));
}

/// get ADC value of channel at position 'pos' (0-9)
uint32_t getADCValue(uint32_t pos)
{
const uint32_t word = pos / ChannelsPerHalfWord;
const uint32_t posInWord = pos % ChannelsPerHalfWord;

return (adcValues[word] >> (posInWord * DataBitSize)) & BitMask;
}

/// get ADC value in float
float getADCValueFloat(uint32_t pos)
{
return float(getADCValue(pos)) * FloatConversion;
}

/// reset all ADC values
void reset()
{
adcValues[0] = 0;
adcValues[1] = 0;
}
};

template <uint32_t DataBitSizeT, uint32_t SignificantBitsT, bool HasHeaderT>
struct ContainerT;

template <uint32_t DataBitSizeT, uint32_t SignificantBitsT>
struct ContainerT<DataBitSizeT, SignificantBitsT, true> {
Header header; ///< header data
Data<DataBitSizeT, SignificantBitsT> data[0]; ///< 128 bit words with 12bit ADC values

uint32_t dataWords() { return header.numWordsPayload + 1; }
};

template <uint32_t DataBitSizeT, uint32_t SignificantBitsT>
struct ContainerT<DataBitSizeT, SignificantBitsT, false> {
Data<DataBitSizeT, SignificantBitsT> data[0]; ///< 128 bit words with 12bit ADC values

uint32_t dataWords() { return 7; }
};

/// Container for decoded data, either zero suppressed or pure raw data
///
/// In case of pure raw data, no header is needed, since all 80 channels will be filled
template <uint32_t DataBitSizeT = 12, uint32_t SignificantBitsT = 2, bool HasHeaderT = true>
struct Container {
static constexpr uint32_t ChannelsPerWord = DataWordSizeBits / DataBitSizeT; ///< number of ADC values in one 128b word

ContainerT<DataBitSizeT, SignificantBitsT, HasHeaderT> cont; ///< Templated data container

/// return 12bit ADC value for a specific word in the data stream
uint32_t getADCValue(uint32_t word) { return cont.data[word / ChannelsPerWord].getADCValue(word % ChannelsPerWord); }

/// return 12bit ADC value for a specific word in the data stream converted to float
float getADCValueFloat(uint32_t word) { return cont.data[word / ChannelsPerWord].getADCValueFloat(word % ChannelsPerWord); }

/// set 12bit ADC value for a specific word in the data stream
void setADCValue(uint32_t word, uint64_t value) { cont.data[word / ChannelsPerWord].setADCValue(word % ChannelsPerWord, value); }

/// return 12bit ADC value for a specific word in the data stream converted to float
void setADCValueFloat(uint32_t word, float value) { cont.data[word / ChannelsPerWord].setADCValueFloat(word % ChannelsPerWord, value); }

/// reset all ADC values
void reset()
{
for (int i = 0; i < cont.dataWords(); ++i) {
cont.data[i].reset();
}
}

/// get position of next container. Validity check to be done outside!
Container* next()
{
return (Container*)(this + cont.dataWords() * DataWordSizeBytes);
}
}; // namespace zerosupp_link_based

using ContainerZS = Container<>;
using ContainerDecoded = Container<10, 0, false>;

} // namespace zerosupp_link_based
} // namespace tpc
} // namespace o2

#endif
1 change: 1 addition & 0 deletions DataFormats/Detectors/TPC/src/DataFormatsTPCLinkDef.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,6 @@
#pragma link C++ class o2::tpc::CompressedClustersCounters + ;
#pragma link C++ class o2::tpc::CompressedClustersPtrs_helper < o2::tpc::CompressedClustersCounters> + ;
#pragma link C++ class o2::tpc::CompressedClusters + ;
#pragma link C++ enum o2::tpc::StatisticsType;

#endif
24 changes: 24 additions & 0 deletions DataFormats/Detectors/TPC/src/LaserTrack.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,16 @@
/// \brief Laser track parameters
/// \author Jens Wiechula, jens.wiechula@ikf.uni-frankfurt.de

#include <memory>
#include <fstream>
#include <iostream>
#include <sstream>
#include <vector>
#include <FairMQLogger.h>

#include "TFile.h"
#include "TTree.h"

#include "DataFormatsTPC/LaserTrack.h"

using namespace o2::tpc;
Expand Down Expand Up @@ -45,3 +50,22 @@ void LaserTrackContainer::loadTracksFromFile()
mLaserTracks[id] = LaserTrack(id, x, alpha, {p0, p1, p2, p3, p4});
}
}

void LaserTrackContainer::dumpToTree(const std::string_view fileName)
{
LaserTrackContainer c;
c.loadTracksFromFile();
const auto& tracks = c.getLaserTracks();
std::vector<LaserTrack> vtracks;

for (const auto& track : tracks) {
vtracks.emplace_back(track);
}

std::unique_ptr<TFile> fout(TFile::Open(fileName.data(), "recreate"));
TTree t("laserTracks", "Laser Tracks");
t.Branch("tracks", &vtracks);
t.Fill();
fout->Write();
fout->Close();
}
Loading