#include "PHASIC++/Process/Single_Process.H" #include "PHASIC++/Process/ME_Generator_Base.H" #include "PHASIC++/Process/MCatNLO_Process.H" #include "PHASIC++/Main/Process_Integrator.H" #include "PHASIC++/Scales/KFactor_Setter_Base.H" #include "PHASIC++/Selectors/Combined_Selector.H" #include "PHASIC++/Main/Phase_Space_Handler.H" #include "PHASIC++/Channels/BBar_Multi_Channel.H" #include "PHASIC++/Channels/CS_Dipole.H" #include "PDF/Main/ISR_Handler.H" #include "PDF/Main/Shower_Base.H" #include "PDF/Main/Cluster_Definitions_Base.H" #include "BEAM/Main/Beam_Spectra_Handler.H" #include "ATOOLS/Phys/Cluster_Amplitude.H" #include "ATOOLS/Phys/Weight_Info.H" #include "ATOOLS/Phys/Hard_Process_Variation_Generator.H" #include "ATOOLS/Math/Random.H" #include "ATOOLS/Org/Run_Parameter.H" #include "ATOOLS/Org/MyStrStream.H" #include "ATOOLS/Org/Scoped_Settings.H" #include "METOOLS/Explicit/NLO_Counter_Terms.H" #include "MODEL/Main/Coupling_Data.H" #include "MODEL/Main/Running_AlphaS.H" #include #include using namespace PHASIC; using namespace MODEL; using namespace ATOOLS; Single_Process::Single_Process(): m_lastxs(0.0), m_lastbxs(0.0), m_dsweight(1.0), m_lastflux(0.0), m_zero(false), m_dads(true), m_pdfcts(true), m_nfconvscheme(0) { Settings& s = Settings::GetMainSettings(); m_pdfcts = s["MEPSNLO_PDFCT"].SetDefault(true).Get(); m_dads = s["MCNLO_DADS"].SetDefault(true).Get(); std::string ncs{ s["NLO_NF_CONVERSION_TERMS"] .SetDefault("None") .UseNoneReplacements() .Get() }; if (ncs=="None" || ncs=="0") m_nfconvscheme=0; else if (ncs=="On" || ncs=="1") m_nfconvscheme=1; else if (ncs=="Only" || ncs=="2") m_nfconvscheme=2; else THROW(fatal_error,"Unknown NLO_NF_CONVERSION_TERMS scheme."); static bool printed(false); if (!printed && m_nfconvscheme>0) { if (m_nfconvscheme==1) msg_Info()<<"NLO n_f scheme conversion terms added to I-operator."<> asscontribvars = s["ASSOCIATED_CONTRIBUTIONS_VARIATIONS"].GetMatrix(); for (const auto& asscontribvarparams : asscontribvars) { m_asscontrib.push_back(asscontrib::none); for (const auto& asscontribvarparam : asscontribvarparams) { m_asscontrib.back() |= asscontribvarparam; } } } Single_Process::~Single_Process() { for (Coupling_Map::const_iterator cit(m_cpls.begin());cit!=m_cpls.end();++cit) delete cit->second; for (auto gen : m_hard_process_variation_generators) delete gen; } size_t Single_Process::Size() const { return 1; } Process_Base *Single_Process::operator[](const size_t &i) { if (i==0) return this; return NULL; } Weight_Info *Single_Process::OneEvent(const int wmode, ATOOLS::Variations_Mode varmode, const int mode) { p_selected = this; auto psh = p_int->PSHandler(); if (p_int->ISR()) { if (m_nin == 2) { if (m_flavs[0].Mass() != p_int->ISR()->Flav(0).Mass() || m_flavs[1].Mass() != p_int->ISR()->Flav(1).Mass()) { p_int->ISR()->SetPartonMasses(m_flavs); } } } psh->InitCuts(); return p_int->PSHandler()->OneEvent(this,varmode,mode); } double Single_Process::KFactor(const int mode) const { if (p_kfactor && p_kfactor->On()) return p_kfactor->KFactor(mode); return 1.0; } double Single_Process::NfSchemeConversionTerms() const { if (m_nfconvscheme==0) return 0.; DEBUG_FUNC("scheme="<Nf(1.e20)) THROW(fatal_error,"Inconsistent Nf between PDFs and \\alpha_s."); if (nfb==-1) nfa=MODEL::as->Nf(1.e20); const size_t nf(nfb); const size_t nfl(Flavour(kf_quark).Size()/2); if (nfl==nf) return 0.; // debugging output msg_Debugging()<<"a="<Default()*qcdcpl->Factor()); MODEL::Coupling_Data *qedcpl(m_cpls.Get("Alpha_QED")); double aqed(qedcpl->Default()*qedcpl->Factor()); double res(0.); double p(m_maxcpl[0]-m_pinfo.m_fi.m_nlocpl[0]); double facqcd(as/3./M_PI*MODEL::as->TR()), facqed(aqed/3/M_PI); msg_Debugging()<<"p="< "<<(m_nfconvscheme==2?0.:m_lastxs)+m_lastbxs*res<ISR() && p_int->ISR()->On()&(1<Scale(stp::fac)*muf2fac); double lmur2(p_scale->Scale(stp::ren)*mur2fac); msg_Debugging()<<"\\mu_F = "<Default() * cpl->Factor(); } // determine counter-term double ct(0.0), lt(log(t1/t2)), x(p_int->ISR()->CalcX(p)); msg_Debugging()<ISR()->PDFWeight(1<<(i+1),p,p,lmuf2,lmuf2,fl,fl,0); if (IsZero(fb,th)) { msg_Tracking()<ISR()->PDFWeight (1<<(i+1),p/z,p/z,lmuf2,lmuf2,jet[j],jet[j],0); double fc=p_int->ISR()->PDFWeight (1<<(i+1),p,p,lmuf2,lmuf2,jet[j],jet[j],0); msg_Debugging()<<" P_{"< "< 2) { THROW(not_implemented, "More than two incoming particles."); } Cluster_Sequence_Info csi; AddISR(csi, ampls, Q2, muf2fac, mur2fac, showermuf2fac, as, nominalcsi); AddBeam(csi, Q2); return csi; } void Single_Process::AddISR(ATOOLS::Cluster_Sequence_Info &csi, const ATOOLS::ClusterAmplitude_Vector &ls, const double &Q2, const double &muf2fac, const double &mur2fac, const double &showermuf2fac, MODEL::Running_AlphaS * as, const ATOOLS::Cluster_Sequence_Info * const nominalcsi) { DEBUG_FUNC(Name()); if (p_int->ISR()) { // add external PDF weight (before clustering) double pdfext(p_int->ISR()->PDFWeight(0, p_int->Momenta()[0], p_int->Momenta()[1], Q2, Q2, m_flavs[0], m_flavs[1])); msg_Debugging()<<"PDF(fla="< "<Next()) { // skip decays, equal scales, unordered configs, // and quarks below threshold // skip decays (they are not even added to the splittings) msg_IODebugging()<<*ampl<<"\n"; if (ampl->NLO()&1) { msg_Debugging()<<"Skip. NLO clustering "<< ID(ampl->Next()->Splitter()->Id())<<"\n"; continue; } if (ampl->Next() && ampl->Next()->Splitter()->Stat() == 3) { msg_Debugging()<<"Skip. Decay "<< ID(ampl->Next()->Splitter()->Id())<<"\n"; continue; } // add splitting Flavour f1(ampl->Leg(0)->Flav().Bar()); Flavour f2(ampl->Leg(1)->Flav().Bar()); if (MapProc() && LookUp() && !m_pinfo.Has(nlo_type::real)) { f1=ReMap(f1, ampl->Leg(0)->Id()); f2=ReMap(f2, ampl->Leg(1)->Id()); } // ampl->KT2() has actual splitting scale // ampl->MuF() is fac-scale as given in scale setter csi.AddSplitting(ampl->KT2(), p_int->ISR()->CalcX(-ampl->Leg(0)->Mom()), p_int->ISR()->CalcX(-ampl->Leg(1)->Mom()), f1, f2); // skip equal scales if (IsEqual(currentQ2 / currentscalefactor, ampl->Next() ? showermuf2fac * ampl->KT2() : Q2)) { msg_Debugging()<<"Skip. Scales equal: t_i="< "<KT2())<<"\n"; currentQ2 = sqrt(std::numeric_limits::max()); csi.AddPDFRatio(1.,1.); continue; } // skip when a scale is below a (quark) mass threshold if (currentQ2 / currentscalefactor < sqr(2.0 * f1.Mass(true)) || currentQ2 / currentscalefactor < sqr(2.0 * f2.Mass(true))) { msg_Debugging()<<"Skip. Quarks below threshold: t=" <ISR()->PDFWeight(2, -ampl->Leg(0)->Mom(), -ampl->Leg(1)->Mom(), currentQ2, currentQ2, f1, f2, 0)); double wd2(p_int->ISR()->PDFWeight(4, -ampl->Leg(0)->Mom(), -ampl->Leg(1)->Mom(), currentQ2, currentQ2, f1, f2, 0)); // new scale (note: for the core scale we use Q2 instead of ampl->MuF2 // because we might be reweighting and Q2 could have been multiplied // by a scaling factor, whereas ampl->MuF2 would not reflect this) double lastQ2 = currentQ2; if (ampl->Next() == NULL) { currentQ2 = Q2; currentscalefactor = 1.0; } else { currentQ2 = showermuf2fac * ampl->KT2(); currentscalefactor = showermuf2fac; } // skip when a scale is below a (quark) mass threshold, new scale if (currentQ2 < sqr(2.0 * f1.Mass(true)) || currentQ2 < sqr(2.0 * f2.Mass(true))) { msg_Debugging()<<"Skip. Quarks below threshold: t="<ISR()->PDFWeight(2, -ampl->Leg(0)->Mom(), -ampl->Leg(1)->Mom(), currentQ2, currentQ2, f1, f2, 0)); double wn2(p_int->ISR()->PDFWeight(4, -ampl->Leg(0)->Mom(), -ampl->Leg(1)->Mom(), currentQ2, currentQ2, f1, f2, 0)); double x1=p_int->ISR()->CalcX(-ampl->Leg(0)->Mom()); double x2=p_int->ISR()->CalcX(-ampl->Leg(1)->Mom()); // skip PDF ratio if high-x sanity condition not fullfilled auto validratio1 = (!IsZero(wn1) && !IsZero(wd1)); if (validratio1 && x1 < 1.0) validratio1 = !(dabs(wd1)<1.0e-4*log(1.0 - x1)/log(1.0 - 1.0e-2)); if (validratio1) { csi.AddWeight(wn1 / wd1); } else { msg_Debugging() << "invalid pdf ratio in beam 0," << std::endl; msg_Debugging() << "skip weight." << std::endl; } auto validratio2 = (!IsZero(wn2) && !IsZero(wd2)); if (validratio2 && x2 < 1.0) validratio2 = !(dabs(wd2)<1.0e-4*log(1.0 - x2)/log(1.0 - 1.0e-2)); if (validratio2) { csi.AddWeight(wn2 / wd2); } else { msg_Debugging() << "invalid pdf ratio in beam 1," << std::endl; msg_Debugging() << "skip weight." << std::endl; } // book-keep PDF ratios excl. // a) first one correcting outer PDF from muF to t // b) last numerator taken at muF (this one is to be varied) // use the following identity with i=0 -> core and i=N -> ext // wn-ext * [\prod_{i=0}^{N-1} wn_i/wd_i] // = [wn-ext * \prod_{i=1}^{N-1} wn_i/wd_i * 1/wd_0] * wn-core // = [\prod_{i=1}^N wn_i/wd_{i-1}] * wn-core pdfnum = wn1 * wn2; pdfden = wd1 * wd2; if (ampl->Next() == NULL) { csi.AddPDFRatio(1., pdfden); } else { csi.AddPDFRatio(pdfnum, pdfden); } msg_Debugging()<<"* [ " <<"PDF(fla="< "< "<Beam()->GetBeam(0)->Type()<<", " <Beam()->GetBeam(1)->Type()<<")" <<", x = ("<Beam()->GetBeam(0)->X()<<", " <Beam()->GetBeam(1)->X()<<")" <<", moms = ("<Beam()->GetBeam(0)->OutMomentum(0) <<", "<Beam()->GetBeam(1)->OutMomentum(0)<<")" <<" -> "<Beam()->Weight()<<"\n"; csi.AddWeight(p_int->Beam()->Weight()); } } Weights_Map Single_Process::Differential(const Vec4D_Vector& p, Variations_Mode varmode) { DEBUG_FUNC(Name()<<", RS:"<SetCaller(this); } Scale_Setter_Base* scales {ScaleSetter(1)}; Partonic(p, varmode); double nominal {0.0}; double facscale {0.0}; if (GetSubevtList() == nullptr) { if (varmode == Variations_Mode::all) { assert(Selector()->CombinedResults().size() == 1); m_last *= Selector()->CombinedResults()[0]; } // calculate ISR weight facscale = scales->Scale(stp::fac); m_csi = ClusterSequenceInfo(scales->Amplitudes(), facscale); m_csi.AddFlux(m_lastflux); // update results nominal = m_lastxs + NfSchemeConversionTerms() - m_lastbxs * m_csi.m_ct; m_lastb = m_lastbxs; if (m_use_biweight) { double prefac {m_csi.m_pdfwgt * m_csi.m_flux}; nominal *= prefac; m_lastb *= prefac; } if (p_mc != nullptr && m_dsweight && m_pinfo.Has(nlo_type::vsub)) { // calculate DADS term for MC@NLO: one PS point, many dipoles m_mewgtinfo.m_type |= mewgttype::DADS; if (m_dads) { // ask BBar emission generator for dipole parameters const Dipole_Params dps {p_mc->Active(this)}; if (dps.p_dip != nullptr) { // calculate incoming parton longitudinal momentum fractions std::array x; for (size_t i {0}; i < 2; ++i) { x[i] = Min(p_int->ISR()->CalcX(dps.m_p[i]), 1.0); } for (Process_Base* proc : dps.m_procs) { // NOTE: the following adjustments need to be reset before returning const size_t mcmode {proc->SetMCMode(m_mcmode)}; const bool lookup {proc->LookUp()}; proc->SetLookUp(false); auto wgtmap = proc->Differential(dps.m_p, varmode); wgtmap *= dps.m_weight * m_dsweight; m_dadswgtmap += wgtmap; double dadsmewgt {proc->GetMEwgtinfo()->m_B * dps.m_weight * m_dsweight}; DADS_Info dads {-dadsmewgt, x[0], x[1], (long unsigned int)(proc->Flavours()[0]), (long unsigned int)(proc->Flavours()[1])}; m_mewgtinfo.m_dadsinfos.push_back(dads); // NOTE: here we reset the adjustments we have done above proc->SetLookUp(lookup); proc->SetMCMode(mcmode); } } } } } else { const auto triggers = Selector()->CombinedResults(); for (int i {0}; i < GetSubevtList()->size(); ++i) { auto sub = (*GetSubevtList())[i]; // append RDA info to m_mewgtinfo if (m_mewgtinfo.m_type & mewgttype::H) { RDA_Info rda {sub->m_mewgt, sub->m_mu2[stp::ren], sub->m_mu2[stp::fac], sub->m_mu2[stp::fac], sub->m_i, sub->m_j, sub->m_k}; m_mewgtinfo.m_rdainfos.push_back(rda); msg_Debugging()<Amplitudes()); } else { ReweightRS(scales->Amplitudes()); } } if (m_dads) { m_last -= m_dadswgtmap; } // calculate associated contributions variations (not for DADS events) if (varmode != Variations_Mode::nominal_only && (GetSubevtList() != nullptr || !m_pinfo.Has(nlo_type::rsub))) { CalculateAssociatedContributionVariations(); } if (varmode != Variations_Mode::nominal_only) { for (auto& gen : m_hard_process_variation_generators) { gen->GenerateAndFillWeightsMap(m_last); } } // propagate (potentially) re-clustered momenta if (GetSubevtList() == nullptr) { UpdateIntegratorMomenta(scales->Amplitudes()); } else { for (NLO_subevt* sub : *GetSubevtList()) { UpdateSubeventMomenta(*sub); } } // reset PDF scales to hard factorisation scale (since the PDFInfo object // will be populated with it) p_int->ISR()->SetMuF2(facscale, 0); p_int->ISR()->SetMuF2(facscale, 1); return m_last; } void Single_Process::ResetResultsForDifferential(Variations_Mode varmode) { m_lastflux = 0.0; m_mewgtinfo.Reset(); m_last.Clear(); m_lastb.Clear(); if (varmode != Variations_Mode::nominal_only) { m_last["ME"] = Weights {Variations_Type::qcd}; m_lastb["ME"] = Weights {Variations_Type::qcd}; } m_last = 1.0; m_lastb = 0.0; if (m_dads) { m_dadswgtmap.Clear(); if (varmode != Variations_Mode::nominal_only) { m_dadswgtmap["ME"] = Weights {Variations_Type::qcd}; } m_dadswgtmap = 0.0; } } void Single_Process::UpdateIntegratorMomenta(const Vec4D_Vector& p) { p_int->SetMomenta(p); if (IsMapped()) p_mapproc->Integrator()->SetMomenta(p); } void Single_Process::UpdateIntegratorMomenta(ClusterAmplitude_Vector& ampls) { if (ampls.size()) { Cluster_Amplitude* ampl {ampls.front()->Last()}; if (ampl->NLO() & 256) { Vec4D_Vector p(m_nin + m_nout); for (size_t i(0); i < ampl->Legs().size(); ++i) p[i] = i < m_nin ? -ampl->Leg(i)->Mom() : ampl->Leg(i)->Mom(); p_int->SetMomenta(p); } } } void Single_Process::UpdateSubeventMomenta(NLO_subevt& sub) { if (sub.p_real->p_ampl == nullptr) { return; } Cluster_Amplitude* ampl {sub.p_real->p_ampl->Last()}; if (ampl->NLO() & 256) { for (size_t i(0); i < ampl->Legs().size(); ++i) { *((Vec4D*)&sub.p_mom[i]) = ampl->Leg(i)->Mom(); } for (size_t i(ampl->Legs().size()); i < sub.m_n; ++i) { *((Vec4D*)&sub.p_mom[i]) = Vec4D(); } } } void Single_Process::CalculateFlux(const Vec4D_Vector& p) { if (m_nin == 1) m_lastflux = p_int->ISR()->Flux(p[0]); else m_lastflux = p_int->ISR()->Flux(p[0], p[1]); m_lastflux /= m_issymfac; } void Single_Process::ReweightBVI(ClusterAmplitude_Vector& ampls) { BornLikeReweightingInfo info {m_mewgtinfo, ampls, m_last.Nominal()}; // NOTE: we iterate over m_last's variations, but we also update m_lastb // inside the loop, to avoid code duplication; also note that m_lastb should // not be all-zero if m_lastbxs is zero Reweight(m_last["ME"], [this, &ls, &info]( double varweight, size_t varindex, QCD_Variation_Params& varparams) -> double { if (varweight == 0.0) { m_lastb["ME"].Variation(varindex) = 0.0; return 0.0; } double K {1.0}; if (!m_mewgtinfo.m_bkw.empty()) { K = m_mewgtinfo.m_bkw[varindex]; } if (m_mewgtinfo.m_type == mewgttype::none || m_mewgtinfo.m_type == mewgttype::METS) { const auto res = ReweightBornLike(varparams, info); m_lastb["ME"].Variation(varindex) = (m_lastbxs != 0.0) ? res / m_lastb.BaseWeight() : 0.0; return K * res / m_last.BaseWeight(); } else { const double muR2new {MuR2(varparams, info)}; const Cluster_Sequence_Info csi {ClusterSequenceInfo( varparams, info, muR2new / info.m_muR2, &m_mewgtinfo.m_clusseqinfo)}; double res {0.0}; double resb {0.0}; if (csi.m_pdfwgt != 0.0) { // calculate AlphaS factors (for Born and non-Born contributions) const double alphasratio { AlphaSRatio(info.m_muR2, muR2new, varparams.p_alphas)}; const double alphasfac {pow(alphasratio, info.m_orderqcd)}; double bornalphasfac {1.0}; if (alphasfac != 1.0) { // for the Born contribution within BVIKP, we need to evaluate at // the lower order const bool needslowerorderqcd {m_mewgtinfo.m_type & mewgttype::VI || m_mewgtinfo.m_type & mewgttype::KP}; bornalphasfac = needslowerorderqcd ? alphasfac / alphasratio : alphasfac; } // B term const double Bnew {m_mewgtinfo.m_B * bornalphasfac}; // VI term const double logR {log(muR2new / info.m_muR2)}; const double VInew {(m_mewgtinfo.m_VI + m_mewgtinfo.m_wren[0] * logR + m_mewgtinfo.m_wren[1] * 0.5 * ATOOLS::sqr(logR)) * alphasfac}; // KP terms const double KPnew {KPTerms(&varparams) * alphasfac}; // Calculate K1 double K1 {1.0}; if (m_mewgtinfo.m_bkw.size() > s_variations->Size()) { K1 = m_mewgtinfo.m_bkw[s_variations->Size() + varindex]; } // Calculate final reweighted BVIKP result resb = Bnew * csi.m_pdfwgt; res = (Bnew * K * (1.0 - csi.m_ct) + (VInew + KPnew) * K1) * csi.m_pdfwgt; } m_lastb["ME"].Variation(varindex) = (m_lastbxs != 0.0) ? resb / m_lastb.BaseWeight() : 0.0; return res / m_last.BaseWeight(); } }); } void Single_Process::ReweightRS(ClusterAmplitude_Vector& ampls) { // first reweight all subevents individually BornLikeReweightingInfo info {m_mewgtinfo, ampls, m_last.Nominal()}; auto last_subevt_idx = GetSubevtList()->size() - 1; for (auto& sub : *GetSubevtList()) { sub->m_results["ME"] = Weights {Variations_Type::qcd}; } s_variations->ForEach( [this, &info, &last_subevt_idx](size_t varindex, QCD_Variation_Params& varparams) -> void { double K {1.0}; if (!m_mewgtinfo.m_bkw.empty()) { K = m_mewgtinfo.m_bkw[varindex]; } for (int i {0}; i <= last_subevt_idx; ++i) { auto sub = (*GetSubevtList())[i]; info.m_wgt = sub->m_mewgt; info.m_muR2 = sub->m_mu2[stp::ren]; info.m_muF2 = sub->m_mu2[stp::fac]; info.m_ampls = ClusterAmplitude_Vector(sub->p_real->p_ampl ? 1 : 0, sub->p_real->p_ampl); info.m_fallbackresult = sub->m_result; auto contrib = K * ReweightBornLike(varparams, info); sub->m_results["ME"].Variation(varindex) = contrib / sub->m_results.BaseWeight(); } }); // finally, add the subevent weights m_last.Clear(); m_last = 0.0; for (int i {0}; i <= last_subevt_idx; ++i) { auto sub = (*GetSubevtList())[i]; m_last += sub->m_results; } } void Single_Process::CalculateAssociatedContributionVariations() { // we need to at least set them to 1.0, if there is no genuine contribution, // since it's always expected by the output handlers, that all variation // weights are filled consistently across events for (const auto& asscontrib : m_asscontrib) { const std::string key = ToString(asscontrib); m_last["ASSOCIATED_CONTRIBUTIONS"][key] = 1.0; m_last["ASSOCIATED_CONTRIBUTIONS"]["MULTI" + key] = 1.0; m_last["ASSOCIATED_CONTRIBUTIONS"]["EXP" + key] = 1.0; } if (m_asscontrib.empty() || !(m_mewgtinfo.m_type & mewgttype::VI)) return; if (GetSubevtList() == nullptr) { // calculate BVIKP - DADS as the reference point for the additive correction const double BVIKP { m_mewgtinfo.m_B * (1 - m_csi.m_ct) + m_mewgtinfo.m_VI + m_mewgtinfo.m_KP}; const double DADS { m_dads ? m_dadswgtmap.Nominal("ME") / m_csi.m_pdfwgt : 0.0}; const double BVIKPDADS {BVIKP - DADS}; if (IsBad(BVIKPDADS)) return; // calculate order in QCD auto orderqcd = m_mewgtinfo.m_oqcd; if (m_mewgtinfo.m_type & mewgttype::VI || m_mewgtinfo.m_type & mewgttype::KP) { orderqcd--; } for (const auto& asscontrib : m_asscontrib) { // collect terms double Bassnew {0.0}, Deltaassnew {1.0}, Deltaassnewexp {1.0}; for (size_t i(0); i < m_mewgtinfo.m_wass.size(); ++i) { // m_wass[0] is EW Sudakov-type correction // m_wass[1] is the subleading Born // m_wass[2] is the subsubleading Born, etc if (m_mewgtinfo.m_wass[i] && asscontrib & (1 << i)) { const double relfac {m_mewgtinfo.m_wass[i] / m_mewgtinfo.m_B}; if (1.0 + relfac > 10.0) { msg_Error() << "KFactor from EWVirt is large: " << relfac << " -> ignore\n"; Deltaassnew = 1.0; Deltaassnewexp = 1.0; Bassnew = 0.0; break; } else { if (i == 0) { Deltaassnew *= 1.0 + relfac; Deltaassnewexp *= exp(relfac); } Bassnew += m_mewgtinfo.m_wass[i]; } } if ((orderqcd - i) == 0) break; } // store variations const std::string key = ToString(asscontrib); m_last["ASSOCIATED_CONTRIBUTIONS"][key] = (BVIKPDADS + Bassnew) / BVIKPDADS; m_last["ASSOCIATED_CONTRIBUTIONS"]["MULTI" + key] = Deltaassnew; m_last["ASSOCIATED_CONTRIBUTIONS"]["EXP" + key] = Deltaassnewexp; } } } void Single_Process::InitMEWeightInfo() { m_mewgtinfo.m_oqcd = MaxOrder(0); m_mewgtinfo.m_oew = MaxOrder(1); m_mewgtinfo.m_fl1 = (int)(Flavours()[0]); m_mewgtinfo.m_fl2 = (int)(Flavours()[1]); m_mewgtinfo.m_x1 = p_int->ISR()->X1(); m_mewgtinfo.m_x2 = p_int->ISR()->X2(); } void Single_Process::UpdateMEWeightInfo(Scale_Setter_Base* scales) { m_mewgtinfo *= m_lastflux; if (scales != nullptr) { m_mewgtinfo.m_muf2 = scales->Scale(stp::fac); m_mewgtinfo.m_mur2 = scales->Scale(stp::ren); } m_mewgtinfo.m_clusseqinfo = m_csi; msg_Debugging()<ISR()->PDF(0); PDF::PDF_Base *nominalpdf2 = p_int->ISR()->PDF(1); const double xf1 = p_int->ISR()->XF1(); const double xf2 = p_int->ISR()->XF2(); p_int->ISR()->SetPDF(varparams.p_pdf1, 0); p_int->ISR()->SetPDF(varparams.p_pdf2, 1); double muF2fac {1.0}; if (varparams.m_showermuF2enabled) muF2fac = varparams.m_muF2fac; ATOOLS::Cluster_Sequence_Info csi( ClusterSequenceInfo(info.m_ampls, Q2, varparams.m_muF2fac, mur2fac, muF2fac, varparams.p_alphas, nominalcsi)); if (csi.m_pdfwgt == 0.0 && m_mewgtinfo.m_clusseqinfo.m_pdfwgt != 0.0) { varparams.IncrementOrInitialiseWarningCounter("Target PDF weight is zero, nominal is non-zero"); } else if (csi.m_pdfwgt != 0.0 && m_mewgtinfo.m_clusseqinfo.m_pdfwgt == 0.0) { varparams.IncrementOrInitialiseWarningCounter("Target PDF weight is non-zero, nominal is zero"); } // reset p_int->ISR()->SetPDF(nominalpdf1, 0); p_int->ISR()->SetPDF(nominalpdf2, 1); p_int->ISR()->SetMuF2(info.m_muF2, 0); p_int->ISR()->SetMuF2(info.m_muF2, 1); p_int->ISR()->SetXF1(xf1); p_int->ISR()->SetXF2(xf2); return csi; } double Single_Process::KPTerms(const ATOOLS::QCD_Variation_Params * varparams) { double KP(KPTerms(0, varparams->p_pdf1, varparams->p_pdf2, varparams->m_muF2fac) * m_lastflux); return KP; } double Single_Process::KPTerms (int mode, PDF::PDF_Base *pdfa, PDF::PDF_Base *pdfb, double scalefac2) { THROW(fatal_error,"Virtual function not reimplemented."); return 0.; } double Single_Process::MuR2( const ATOOLS::QCD_Variation_Params& varparams, Single_Process::BornLikeReweightingInfo & info) const { double mu2new(info.m_muR2 * varparams.m_muR2fac); double showermu2fac {1.0}; if (varparams.m_showermuR2enabled) showermu2fac = varparams.m_muR2fac; if ((showermu2fac != 1.0) && (m_pinfo.m_ckkw & 1)) { ATOOLS::ClusterAmplitude_Vector &ls = info.m_ampls; if (ampls.size()) { // go through cluster sequence double alphasnewproduct(1.0); double oqcdsum(0.0); double minmu2(1.0); Cluster_Amplitude *ampl(ampls.front()); for (; ampl->Next(); ampl = ampl->Next()) { if (m_pinfo.Has(nlo_type::real) && ampl->Prev() == NULL) { continue; } double oqcd(ampl->OrderQCD() - ampl->Next()->OrderQCD()); if (oqcd > 0.0) { double mu2(Max(ampl->Mu2(), MODEL::as->CutQ2())); mu2 = Min(mu2, sqr(rpa->gen.Ecms())); const double mu2new(mu2 * showermu2fac); minmu2 = Min(minmu2, mu2new); const double alphasnew(varparams.p_alphas->BoundedAlphaS(mu2new)); alphasnewproduct *= pow(alphasnew, oqcd); oqcdsum += oqcd; } } const double oqcdremainder(ampl->OrderQCD() - (m_pinfo.Has(nlo_type::vsub) ? 1 : 0)); if (oqcdremainder) { const double mu2(Max(ampl->Mu2(), MODEL::as->CutQ2())); const double mu2new(mu2 * showermu2fac); minmu2 = Min(minmu2, mu2new); const double alphasnew(varparams.p_alphas->BoundedAlphaS(mu2new)); alphasnewproduct *= pow(alphasnew, oqcdremainder); oqcdsum += oqcdremainder; } if (oqcdsum) { // solve for new mu2 const double alphasnewaverage(pow(alphasnewproduct, 1.0 / oqcdsum)); const double maxmu2(showermu2fac * 1.01 * sqr(rpa->gen.Ecms())); mu2new = MODEL::as->WDBSolve(alphasnewaverage, minmu2, maxmu2); } } } return mu2new; } double Single_Process::AlphaSRatio( double mur2old, double mur2new, MODEL::Running_AlphaS * asnew) { const double alphasnew((*asnew)(mur2new)); const double alphasold((*MODEL::as)(mur2old)); return alphasnew / alphasold; } bool Single_Process::CalculateTotalXSec(const std::string &resultpath, const bool create) { p_int->Reset(); auto psh = p_int->PSHandler(); if (p_int->ISR()) { if (m_nin==2) { if (m_flavs[0].Mass()!=p_int->ISR()->Flav(0).Mass() || m_flavs[1].Mass()!=p_int->ISR()->Flav(1).Mass()) { p_int->ISR()->SetPartonMasses(m_flavs); } } } psh->CreateIntegrators(); psh->InitCuts(); p_int->SetResultPath(resultpath); p_int->ReadResults(); exh->AddTerminatorObject(p_int); double var(p_int->TotalVar()); msg_Info()<Name():"")<<")"<Integrate()/rpa->Picobarn()); if (!IsEqual(totalxs,p_int->TotalResult())) { msg_Error()<<"Result of PS-Integrator and summation do not coincide!\n" <<" '"<TotalResult()<Points()) { p_int->SetTotal(); if (var==p_int->TotalVar()) { exh->RemoveTerminatorObject(p_int); return 1; } p_int->StoreResults(); exh->RemoveTerminatorObject(p_int); return 1; } exh->RemoveTerminatorObject(p_int); return 0; } void Single_Process::SetScale(const Scale_Setter_Arguments &args) { if (IsMapped()) return; Scale_Setter_Arguments cargs(args); cargs.p_proc=this; cargs.p_cpls=&m_cpls; p_scale = Scale_Setter_Base::Scale_Getter_Function:: GetObject(m_pinfo.m_scale=cargs.m_scale,cargs); if (p_scale==NULL) THROW(fatal_error,"Invalid scale scheme"); } void Single_Process::SetKFactor(const KFactor_Setter_Arguments &args) { if (IsMapped()) return; KFactor_Setter_Arguments cargs(args); cargs.p_proc=this; m_pinfo.m_kfactor=cargs.m_kfac; p_kfactor = KFactor_Setter_Base::KFactor_Getter_Function:: GetObject(m_pinfo.m_kfactor=cargs.m_kfac,cargs); if (p_kfactor==NULL) THROW(fatal_error,"Invalid kfactor scheme"); } void Single_Process::InitializeTheReweighting(ATOOLS::Variations_Mode mode) { if (mode == Variations_Mode::nominal_only) return; // Parse settings for hard process variation generators; note that this can // not be done in the ctor, because the matrix element handler has not yet // fully configured the process at this point, and some variation generators // might require that (an example would be EWSud) Settings& s = Settings::GetMainSettings(); for (auto varitem : s["VARIATIONS"].GetItems()) { if (varitem.IsScalar()) { const auto name = varitem.Get(); if (name == "None") { return; } else { m_hard_process_variation_generators.push_back( Hard_Process_Variation_Generator_Getter_Function:: GetObject(name, Hard_Process_Variation_Generator_Arguments{this}) ); if (m_hard_process_variation_generators.back() == nullptr) THROW(fatal_error, "Variation generator \"" + name + "\" not found"); } } } } void Single_Process::SetLookUp(const bool lookup) { m_lookup=lookup; } bool Single_Process::Combinable (const size_t &idi,const size_t &idj) { THROW(not_implemented, "To be implemented by child classes"); return false; } const Flavour_Vector &Single_Process:: CombinedFlavour(const size_t &idij) { THROW(not_implemented, "To be implemented by child classes"); static Flavour_Vector fls(1,kf_none); return fls; } ATOOLS::Flavour Single_Process::ReMap (const ATOOLS::Flavour &fl,const size_t &id) const { return fl; } Cluster_Amplitude *Single_Process::Cluster (const Vec4D_Vector &p,const size_t &mode) { if (!(mode&1)) { MCatNLO_Process *mp(dynamic_cast(Parent())); if (mp) { Cluster_Amplitude *ampl(mp->GetAmplitude()); return ampl; } } NLO_subevtlist *subs(GetRSSubevtList()); if (subs==NULL) subs=GetSubevtList(); if (subs && subs->back()->p_ampl) return subs->back()->p_ampl->CopyAll(); ClusterAmplitude_Vector &ls(ScaleSetter(1)->Amplitudes()); if (ampls.size()) { msg_Debugging()<Next()) msg_Debugging()<<*ampl<<"\n"; return ampls.front()->CopyAll(); } Cluster_Amplitude *ampl(Cluster_Amplitude::New()); ampl->SetNIn(m_nin); ampl->SetOrderQCD(m_maxcpl[0]); ampl->SetOrderEW(m_maxcpl[1]); ampl->SetProc(this); ampl->SetMS(p_gen); ampl->SetProcs(p_apmap); if (p_int->ColorIntegrator()==NULL) { for (size_t i(0);iCreateLeg(iMomenta()[i]:p_int->Momenta()[i], i tids, atids; for (size_t i(0);iLegs().size();++i) if (ampl->Leg(i)->Flav().StrongCharge()>0) { tids.push_back(i); if (ampl->Leg(i)->Flav().StrongCharge()==8) atids.push_back(i); else ampl->Leg(i)->SetCol(ColorID(ampl->Leg(i)->Col().m_i,0)); } else if (ampl->Leg(i)->Flav().StrongCharge()<0) { ampl->Leg(i)->SetCol(ColorID(0,ampl->Leg(i)->Col().m_j)); atids.push_back(i); } else { ampl->Leg(i)->SetCol(ColorID(0,0)); } while (true) { std::shuffle(atids.begin(),atids.end(),*ran); size_t i(0); for (;iLeg(tids[i])->SetCol(ColorID(cl,ampl->Leg(tids[i])->Col().m_j)); ampl->Leg(atids[i])->SetCol(ColorID(ampl->Leg(atids[i])->Col().m_i,cl)); } } else { Int_Vector ci(p_int->ColorIntegrator()->I()); Int_Vector cj(p_int->ColorIntegrator()->J()); for (size_t i(0);iCreateLeg(iMomenta()[i]:p_int->Momenta()[i], iSetMuF2(ScaleSetter(1)->Scale(stp::fac)); ampl->SetMuR2(ScaleSetter(1)->Scale(stp::ren)); ampl->SetMuQ2(ScaleSetter(1)->Scale(stp::res)); return ampl; }