Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  import copy 
  18  import fractions 
  19  import glob 
  20  import logging 
  21  import os 
  22  import stat 
  23  import sys 
  24  import re 
  25  import shutil 
  26  import subprocess 
  27  import itertools 
  28  import time 
  29  import datetime 
  30   
  31   
  32  import aloha 
  33   
  34  import madgraph.core.base_objects as base_objects 
  35  import madgraph.core.color_algebra as color 
  36  import madgraph.core.helas_objects as helas_objects 
  37  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  38  import madgraph.iolibs.drawing_eps as draw 
  39  import madgraph.iolibs.files as files 
  40  import madgraph.iolibs.group_subprocs as group_subprocs 
  41  import madgraph.various.banner as banner_mod 
  42  import madgraph.various.misc as misc 
  43  import madgraph.various.q_polynomial as q_polynomial 
  44  import madgraph.iolibs.file_writers as writers 
  45  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  46  import madgraph.iolibs.template_files as template_files 
  47  import madgraph.iolibs.ufo_expression_parsers as parsers 
  48  import madgraph.iolibs.export_v4 as export_v4 
  49  import madgraph.various.diagram_symmetry as diagram_symmetry 
  50  import madgraph.various.process_checks as process_checks 
  51  import madgraph.various.progressbar as pbar 
  52  import madgraph.various.q_polynomial as q_polynomial 
  53  import madgraph.core.color_amp as color_amp 
  54  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  55  import models.check_param_card as check_param_card 
  56  from madgraph.loop.loop_base_objects import LoopDiagram 
  57  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  58   
  59   
  60   
  61  pjoin = os.path.join 
  62   
  63  import aloha.create_aloha as create_aloha 
  64  import models.write_param_card as param_writer 
  65  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  66  from madgraph.iolibs.files import cp, ln, mv 
  67  pjoin = os.path.join 
  68  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  69  logger = logging.getLogger('madgraph.loop_exporter') 
  70   
  71  #=============================================================================== 
  72  # LoopExporterFortran 
  73  #=============================================================================== 
74 -class LoopExporterFortran(object):
75 """ Class to define general helper functions to the different 76 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 77 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 78 It plays the same role as ProcessExporterFrotran and simply defines here 79 loop-specific helpers functions necessary for all loop exporters. 80 Notice that we do not have LoopExporterFortran inheriting from 81 ProcessExporterFortran but give access to arguments like dir_path and 82 clean using options. This avoids method resolution object ambiguity""" 83 84 default_opt = dict(export_v4.ProcessExporterFortran.default_opt) 85 default_opt.update({'clean': False, 'complex_mass':False, 86 'export_format':'madloop', 'mp':True, 87 'loop_dir':'', 'cuttools_dir':'', 88 'fortran_compiler':'gfortran', 89 'SubProc_prefix': 'P', 90 'output_dependencies': 'external', 91 'compute_color_flows': False, 92 'mode':''}) 93 94 include_names = {'ninja' : 'mninja.mod', 95 'golem' : 'generic_function_1p.mod', 96 'samurai':'msamurai.mod', 97 'collier': 'collier.mod'} 98
99 - def __init__(self, dir_path = "", opt=None):
100 """Initiate the LoopExporterFortran with directory information on where 101 to find all the loop-related source files, like CutTools""" 102 103 104 self.opt = dict(self.default_opt) 105 if opt: 106 self.opt.update(opt) 107 108 self.SubProc_prefix = self.opt['SubProc_prefix'] 109 self.loop_dir = self.opt['loop_dir'] 110 self.cuttools_dir = self.opt['cuttools_dir'] 111 self.fortran_compiler = self.opt['fortran_compiler'] 112 self.dependencies = self.opt['output_dependencies'] 113 self.compute_color_flows = self.opt['compute_color_flows'] 114 115 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
116 117 191
192 - def get_aloha_model(self, model):
193 """ Caches the aloha model created here as an attribute of the loop 194 exporter so that it can later be used in the LoopHelasMatrixElement 195 in the function compute_all_analytic_information for recycling aloha 196 computations across different LoopHelasMatrixElements steered by the 197 same loop exporter. 198 """ 199 if not hasattr(self, 'aloha_model'): 200 self.aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) 201 return self.aloha_model
202 203 #=========================================================================== 204 # write the multiple-precision header files 205 #===========================================================================
206 - def write_mp_files(self, writer_mprec, writer_mpc):
207 """Write the cts_mprec.h and cts_mpc.h""" 208 209 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 210 writer_mprec.writelines(file) 211 212 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 213 file = file.replace('&','') 214 writer_mpc.writelines(file) 215 216 return True
217 218 #=============================================================================== 219 # LoopProcessExporterFortranSA 220 #===============================================================================
221 -class LoopProcessExporterFortranSA(LoopExporterFortran, 222 export_v4.ProcessExporterFortranSA):
223 224 """Class to take care of exporting a set of loop matrix elements in the 225 Fortran format.""" 226 227 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 228 madloop_makefile_name = 'makefile' 229 230 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 231 style='classic2', color='green', 232 top_frame_char = '=', bottom_frame_char = '=', 233 left_frame_char = '{',right_frame_char = '}', 234 print_frame=True, side_margin = 7, up_margin = 1) 235
236 - def __init__(self, *args, **opts):
237 super(LoopProcessExporterFortranSA,self).__init__(*args,**opts) 238 self.unique_id=0 # to allow collier to distinguish the various loop subprocesses 239 self.has_loop_induced = False
240
241 - def copy_template(self, model):
242 """Additional actions needed to setup the Template. 243 """ 244 super(LoopProcessExporterFortranSA, self).copy_template(model) 245 246 self.loop_additional_template_setup()
247
248 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
249 """create the global information for loops""" 250 251 super(LoopProcessExporterFortranSA,self).finalize(matrix_element, 252 cmdhistory, MG5options, outputflag) 253 254 255 MLCard = banner_mod.MadLoopParam(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')) 256 # For loop-induced processes and *only* when summing over all helicity configurations 257 # (which is the default for standalone usage), COLLIER is faster than Ninja. 258 if self.has_loop_induced: 259 MLCard['MLReductionLib'] = "7|6|1" 260 # Computing the poles with COLLIER also unnecessarily slows down the code 261 # It should only be set to True for checks and it's acceptable to remove them 262 # here because for loop-induced processes they should be zero anyway. 263 # We keep it active for non-loop induced processes because COLLIER is not the 264 # main reduction tool in that case, and the poles wouldn't be zero then 265 MLCard['COLLIERComputeUVpoles'] = False 266 MLCard['COLLIERComputeIRpoles'] = False 267 268 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams_default.dat')) 269 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
270
271 - def write_f2py_makefile(self):
272 return
273
274 - def write_f2py_check_sa(self, matrix_element, output_path):
275 """ Write the general check_sa.py in SubProcesses that calls all processes successively.""" 276 277 # No need to further edit this file for now. 278 file = open(os.path.join(self.template_dir,\ 279 'check_sa_all.py.inc')).read() 280 open(output_path,'w').writelines(file) 281 # Make it executable 282 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
283 284
285 - def write_f2py_splitter(self):
286 """write a function to call the correct matrix element""" 287 288 template = """ 289 %(python_information)s 290 291 SUBROUTINE INITIALISE(PATH) 292 C ROUTINE FOR F2PY to read the benchmark point. 293 IMPLICIT NONE 294 CHARACTER*512 PATH 295 CF2PY INTENT(IN) :: PATH 296 CALL SETPARA(PATH) !first call to setup the paramaters 297 RETURN 298 END 299 300 subroutine CHANGE_PARA(name, value) 301 implicit none 302 CF2PY intent(in) :: name 303 CF2PY intent(in) :: value 304 305 character*512 name 306 double precision value 307 308 include '../Source/MODEL/input.inc' 309 include '../Source/MODEL/coupl.inc' 310 include '../Source/MODEL/mp_coupl.inc' 311 include '../Source/MODEL/mp_input.inc' 312 313 SELECT CASE (name) 314 %(parameter_setup)s 315 CASE DEFAULT 316 write(*,*) 'no parameter matching', name 317 END SELECT 318 319 return 320 end 321 322 subroutine update_all_coup() 323 implicit none 324 call coup() 325 call printout() 326 return 327 end 328 329 330 SUBROUTINE SET_MADLOOP_PATH(PATH) 331 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop 332 CHARACTER(512) PATH 333 CF2PY intent(in)::path 334 CALL SETMADLOOPPATH(PATH) 335 END 336 337 subroutine smatrixhel(pdgs, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE) 338 IMPLICIT NONE 339 340 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p 341 CF2PY integer, intent(in), dimension(npdg) :: pdgs 342 CF2PY integer, intent(in) :: npdg 343 CF2PY double precision, intent(out) :: ANS 344 CF2PY integer, intent(out) :: RETURNCODE 345 CF2PY double precision, intent(in) :: ALPHAS 346 CF2PY double precision, intent(in) :: SCALES2 347 348 integer pdgs(*) 349 integer npdg, nhel, RETURNCODE 350 double precision p(*) 351 double precision ANS, ALPHAS, PI,SCALES2 352 353 %(smatrixhel)s 354 355 return 356 end 357 358 subroutine get_pdg_order(OUT) 359 IMPLICIT NONE 360 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i) 361 362 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i) 363 DATA PDGS/ %(pdgs)s / 364 OUT=PDGS 365 RETURN 366 END 367 368 subroutine get_prefix(PREFIX) 369 IMPLICIT NONE 370 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i) 371 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i) 372 DATA PREF / '%(prefix)s'/ 373 PREFIX = PREF 374 RETURN 375 END 376 377 """ 378 379 allids = self.prefix_info.keys() 380 allprefix = [self.prefix_info[key][0] for key in allids] 381 min_nexternal = min([len(ids) for ids in allids]) 382 max_nexternal = max([len(ids) for ids in allids]) 383 384 info = [] 385 for key, (prefix, tag) in self.prefix_info.items(): 386 info.append('#PY %s : %s # %s' % (tag, key, prefix)) 387 388 389 text = [] 390 for n_ext in range(min_nexternal, max_nexternal+1): 391 current = [ids for ids in allids if len(ids)==n_ext] 392 if not current: 393 continue 394 if min_nexternal != max_nexternal: 395 if n_ext == min_nexternal: 396 text.append(' if (npdg.eq.%i)then' % n_ext) 397 else: 398 text.append(' else if (npdg.eq.%i)then' % n_ext) 399 for ii,pdgs in enumerate(current): 400 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)]) 401 if ii==0: 402 text.append( ' if(%s) then ! %i' % (condition, i)) 403 else: 404 text.append( ' else if(%s) then ! %i' % (condition,i)) 405 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[pdgs][0]) 406 text.append(' endif') 407 #close the function 408 if min_nexternal != max_nexternal: 409 text.append('endif') 410 411 params = self.get_model_parameter(self.model) 412 parameter_setup =[] 413 for key, var in params.items(): 414 parameter_setup.append(' CASE ("%s")\n %s = value\n MP__%s = value' 415 % (key, var, var)) 416 417 418 419 formatting = {'python_information':'\n'.join(info), 420 'smatrixhel': '\n'.join(text), 421 'maxpart': max_nexternal, 422 'nb_me': len(allids), 423 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0' 424 for i in range(max_nexternal) \ 425 for pdg in allids]), 426 'prefix':'\',\''.join(allprefix), 427 'parameter_setup': '\n'.join(parameter_setup), 428 } 429 430 431 text = template % formatting 432 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w') 433 fsock.writelines(text) 434 fsock.close()
435 436 437
438 - def loop_additional_template_setup(self, copy_Source_makefile = True):
439 """ Perform additional actions specific for this class when setting 440 up the template with the copy_template function.""" 441 442 # We must change some files to their version for NLO computations 443 cpfiles= ["Cards/MadLoopParams.dat", 444 "SubProcesses/MadLoopParamReader.f", 445 "SubProcesses/MadLoopParams.inc"] 446 if copy_Source_makefile: 447 cpfiles.append("Source/makefile") 448 449 for file in cpfiles: 450 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 451 os.path.join(self.dir_path, file)) 452 453 cp(pjoin(self.loop_dir,'StandAlone/Cards/MadLoopParams.dat'), 454 pjoin(self.dir_path, 'Cards/MadLoopParams_default.dat')) 455 456 ln(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), pjoin(self.dir_path,'SubProcesses')) 457 458 # We might need to give a different name to the MadLoop makefile 459 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 460 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 461 462 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 463 # for the non-optimized output 464 link_tir_libs=[] 465 tir_libs=[] 466 467 filePath = pjoin(self.dir_path, 'SubProcesses', 468 'MadLoop_makefile_definitions') 469 calls = self.write_loop_makefile_definitions( 470 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 471 472 # We need minimal editing of MadLoopCommons.f 473 # For the optimized output, this file will be overwritten once the 474 # availability of COLLIER has been determined. 475 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 476 "SubProcesses","MadLoopCommons.inc")).read() 477 writer = writers.FortranWriter(os.path.join(self.dir_path, 478 "SubProcesses","MadLoopCommons.f")) 479 writer.writelines(MadLoopCommon%{ 480 'print_banner_commands':self.MadLoop_banner}, context={ 481 'collier_available':False}) 482 writer.close() 483 484 # Copy the whole MadLoop5_resources directory (empty at this stage) 485 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 486 'MadLoop5_resources')): 487 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 488 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 489 490 # Link relevant cards from Cards inside the MadLoop5_resources 491 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 492 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 493 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 494 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 495 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 496 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 497 498 # And remove check_sa in the SubProcess folder since now there is a 499 # check_sa tailored to each subprocess. 500 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 501 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 502 503 cwd = os.getcwd() 504 dirpath = os.path.join(self.dir_path, 'SubProcesses') 505 try: 506 os.chdir(dirpath) 507 except os.error: 508 logger.error('Could not cd to directory %s' % dirpath) 509 return 0 510 511 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 512 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 513 writers.FortranWriter('cts_mpc.h')) 514 515 # Return to original PWD 516 os.chdir(cwd) 517 518 # We must link the CutTools to the Library folder of the active Template 519 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
520 521 # This function is placed here and not in optimized exporterd, 522 # because the same makefile.inc should be used in all cases.
523 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 524 tir_libs,tir_include=[]):
525 """ Create the file makefile which links to the TIR libraries.""" 526 527 file = open(os.path.join(self.loop_dir,'StandAlone', 528 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 529 replace_dict={} 530 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 531 replace_dict['tir_libs']=' '.join(tir_libs) 532 replace_dict['dotf']='%.f' 533 replace_dict['prefix']= self.SubProc_prefix 534 replace_dict['doto']='%.o' 535 replace_dict['tir_include']=' '.join(tir_include) 536 file=file%replace_dict 537 if writer: 538 writer.writelines(file) 539 else: 540 return file
541
542 - def convert_model(self, model, wanted_lorentz = [], 543 wanted_couplings = []):
544 """ Caches the aloha model created here when writing out the aloha 545 fortran subroutine. 546 """ 547 self.get_aloha_model(model) 548 super(LoopProcessExporterFortranSA, self).convert_model(model, 549 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
550
551 - def get_ME_identifier(self, matrix_element, 552 group_number = None, group_elem_number = None):
553 """ A function returning a string uniquely identifying the matrix 554 element given in argument so that it can be used as a prefix to all 555 MadLoop5 subroutines and common blocks related to it. This allows 556 to compile several processes into one library as requested by the 557 BLHA (Binoth LesHouches Accord) guidelines. 558 The arguments group_number and proc_id are just for the LoopInduced 559 output with MadEvent.""" 560 561 # When disabling the loop grouping in the LoopInduced MadEvent output, 562 # we have only the group_number set and the proc_id set to None. In this 563 # case we don't print the proc_id. 564 if (not group_number is None) and group_elem_number is None: 565 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 566 group_number) 567 elif group_number is None or group_elem_number is None: 568 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 569 else: 570 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 571 group_number, group_elem_number)
572
573 - def get_SubProc_folder_name(self, process, 574 group_number = None, group_elem_number = None):
575 """Returns the name of the SubProcess directory, which can contain 576 the process goup and group element number for the case of loop-induced 577 integration with MadEvent.""" 578 579 # When disabling the loop grouping in the LoopInduced MadEvent output, 580 # we have only the group_number set and the proc_id set to None. In this 581 # case we don't print the proc_id. 582 if not group_number is None and group_elem_number is None: 583 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 584 group_number,process.shell_string(print_id=False)) 585 elif group_number is None or group_elem_number is None: 586 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 587 else: 588 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 589 group_number, group_elem_number,process.shell_string(print_id=False))
590 591 #=========================================================================== 592 # Set the compiler to be gfortran for the loop processes. 593 #===========================================================================
594 - def compiler_choice(self, compiler=export_v4.default_compiler):
595 """ Different daughter classes might want different compilers. 596 Here, the gfortran compiler is used throughout the compilation 597 (mandatory for CutTools written in f90) """ 598 if isinstance(compiler, str): 599 fortran_compiler = compiler 600 compiler = export_v4.default_compiler 601 compiler['fortran'] = fortran_compiler 602 603 if not compiler['fortran'] is None and not \ 604 any([name in compiler['fortran'] for name in \ 605 ['gfortran','ifort']]): 606 logger.info('For loop processes, the compiler must be fortran90'+\ 607 'compatible, like gfortran.') 608 compiler['fortran'] = 'gfortran' 609 self.set_compiler(compiler,True) 610 else: 611 self.set_compiler(compiler) 612 613 self.set_cpp_compiler(compiler['cpp'])
614
615 - def turn_to_mp_calls(self, helas_calls_list):
616 # Prepend 'MP_' to all the helas calls in helas_calls_list. 617 # Might look like a brutal unsafe implementation, but it is not as 618 # these calls are built from the properties of the HELAS objects and 619 # whether they are evaluated in double or quad precision is none of 620 # their business but only relevant to the output algorithm. 621 # Also the cast to complex masses DCMPLX(*) must be replaced by 622 # CMPLX(*,KIND=16) 623 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 624 625 def replaceWith(match_obj): 626 return match_obj.group('toSub')+'MP_'
627 628 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 629 re.IGNORECASE | re.MULTILINE) 630 631 for i, helas_call in enumerate(helas_calls_list): 632 new_helas_call=MP.sub(replaceWith,helas_call) 633 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 634 new_helas_call)
635 639 647
648 - def make(self):
649 """ Compiles the additional dependences for loop (such as CutTools).""" 650 super(LoopProcessExporterFortranSA, self).make() 651 652 # make CutTools (only necessary with MG option output_dependencies='internal') 653 libdir = os.path.join(self.dir_path,'lib') 654 sourcedir = os.path.join(self.dir_path,'Source') 655 if self.dependencies=='internal': 656 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 657 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 658 if os.path.exists(pjoin(sourcedir,'CutTools')): 659 logger.info('Compiling CutTools (can take a couple of minutes) ...') 660 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 661 logger.info(' ...done.') 662 else: 663 raise MadGraph5Error('Could not compile CutTools because its'+\ 664 ' source directory could not be found in the SOURCE folder.') 665 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 666 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 667 raise MadGraph5Error('CutTools compilation failed.') 668 669 # Verify compatibility between current compiler and the one which was 670 # used when last compiling CutTools (if specified). 671 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 672 libdir, 'libcts.a')))),'compiler_version.log') 673 if os.path.exists(compiler_log_path): 674 compiler_version_used = open(compiler_log_path,'r').read() 675 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 676 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 677 if os.path.exists(pjoin(sourcedir,'CutTools')): 678 logger.info('CutTools was compiled with a different fortran'+\ 679 ' compiler. Re-compiling it now...') 680 misc.compile(['cleanCT'], cwd = sourcedir) 681 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 682 logger.info(' ...done.') 683 else: 684 raise MadGraph5Error("CutTools installation in %s"\ 685 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 686 " seems to have been compiled with a different compiler than"+\ 687 " the one specified in MG5_aMC. Please recompile CutTools.")
688
689 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
690 """Concatenate the coefficient information to reduce it to 691 (fraction, is_imaginary) """ 692 693 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 694 695 return (total_coeff, is_imaginary)
696
697 - def get_amp_to_jamp_map(self, col_amps, n_amps):
698 """ Returns a list with element 'i' being a list of tuples corresponding 699 to all apparition of amplitude number 'i' in the jamp number 'j' 700 with coeff 'coeff_j'. The format of each tuple describing an apparition 701 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 702 703 if(isinstance(col_amps,list)): 704 if(col_amps and isinstance(col_amps[0],list)): 705 color_amplitudes=col_amps 706 else: 707 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 708 else: 709 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 710 711 # To store the result 712 res_list = [[] for i in range(n_amps)] 713 for i, coeff_list in enumerate(color_amplitudes): 714 for (coefficient, amp_number) in coeff_list: 715 res_list[amp_number-1].append((i,self.cat_coeff(\ 716 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 717 718 return res_list
719
720 - def get_color_matrix(self, matrix_element):
721 """Return the color matrix definition lines. This color matrix is of size 722 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 723 amplitude.""" 724 725 logger.info('Computing diagram color coefficients') 726 727 # The two lists have a list of tuples at element 'i' which correspond 728 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 729 # with coeff 'coeffj'. The format of each tuple describing an apparition 730 # is (j, coeffj). 731 ampl_to_jampl=self.get_amp_to_jamp_map(\ 732 matrix_element.get_loop_color_amplitudes(), 733 matrix_element.get_number_of_loop_amplitudes()) 734 if matrix_element.get('processes')[0].get('has_born'): 735 ampb_to_jampb=self.get_amp_to_jamp_map(\ 736 matrix_element.get_born_color_amplitudes(), 737 matrix_element.get_number_of_born_amplitudes()) 738 else: 739 ampb_to_jampb=ampl_to_jampl 740 # Below is the original color matrix multiplying the JAMPS 741 if matrix_element.get('color_matrix'): 742 ColorMatrixDenom = \ 743 matrix_element.get('color_matrix').get_line_denominators() 744 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 745 get_line_numerators(index, denominator) for 746 (index, denominator) in enumerate(ColorMatrixDenom) ] 747 else: 748 ColorMatrixDenom= [1] 749 ColorMatrixNum = [[1]] 750 751 # Below is the final color matrix output 752 ColorMatrixNumOutput=[] 753 ColorMatrixDenomOutput=[] 754 755 # Now we construct the color factors between each born and loop amplitude 756 # by scanning their contributions to the different jamps. 757 start = time.time() 758 progress_bar = None 759 time_info = False 760 for i, jampl_list in enumerate(ampl_to_jampl): 761 # This can be pretty long for processes with many color flows. 762 # So, if necessary (i.e. for more than 15s), we tell the user the 763 # estimated time for the processing. 764 if i==5: 765 elapsed_time = time.time()-start 766 t = len(ampl_to_jampl)*(elapsed_time/5.0) 767 if t > 10.0: 768 time_info = True 769 logger.info('The color factors computation will take '+\ 770 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 771 'Started on %s.'%datetime.datetime.now().strftime(\ 772 "%d-%m-%Y %H:%M")) 773 if logger.getEffectiveLevel()<logging.WARNING: 774 widgets = ['Color computation:', pbar.Percentage(), ' ', 775 pbar.Bar(),' ', pbar.ETA(), ' '] 776 progress_bar = pbar.ProgressBar(widgets=widgets, 777 maxval=len(ampl_to_jampl), fd=sys.stdout) 778 779 if not progress_bar is None: 780 progress_bar.update(i+1) 781 # Flush to force the printout of the progress_bar to be updated 782 sys.stdout.flush() 783 784 line_num=[] 785 line_denom=[] 786 787 # Treat the special case where this specific amplitude contributes to no 788 # color flow at all. So it is zero because of color but not even due to 789 # an accidental cancellation among color flows, but simply because of its 790 # projection to each individual color flow is zero. In such case, the 791 # corresponding jampl_list is empty and all color coefficients must then 792 # be zero. This happens for example in the Higgs Effective Theory model 793 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 794 if len(jampl_list)==0: 795 line_num=[0]*len(ampb_to_jampb) 796 line_denom=[1]*len(ampb_to_jampb) 797 ColorMatrixNumOutput.append(line_num) 798 ColorMatrixDenomOutput.append(line_denom) 799 continue 800 801 for jampb_list in ampb_to_jampb: 802 real_num=0 803 imag_num=0 804 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 805 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 806 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 807 itertools.product(jampl_list,jampb_list)]) 808 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 809 itertools.product(jampl_list,jampb_list): 810 # take the numerator and multiply by lcm/denominator 811 # as we will later divide by the lcm. 812 buff_num=ampl_coeff[0].numerator*\ 813 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 814 abs(common_denom)/(ampl_coeff[0].denominator*\ 815 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 816 # Remember that we must take the complex conjugate of 817 # the born jamp color coefficient because we will compute 818 # the square with 2 Re(LoopAmp x BornAmp*) 819 if ampl_coeff[1] and ampb_coeff[1]: 820 real_num=real_num+buff_num 821 elif not ampl_coeff[1] and not ampb_coeff[1]: 822 real_num=real_num+buff_num 823 elif not ampl_coeff[1] and ampb_coeff[1]: 824 imag_num=imag_num-buff_num 825 else: 826 imag_num=imag_num+buff_num 827 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 828 "color matrix element which has both a real and imaginary part." 829 if imag_num!=0: 830 res=fractions.Fraction(imag_num,common_denom) 831 line_num.append(res.numerator) 832 # Negative denominator means imaginary color coef of the 833 # final color matrix 834 line_denom.append(res.denominator*-1) 835 else: 836 res=fractions.Fraction(real_num,common_denom) 837 line_num.append(res.numerator) 838 # Positive denominator means real color coef of the final color matrix 839 line_denom.append(res.denominator) 840 841 ColorMatrixNumOutput.append(line_num) 842 ColorMatrixDenomOutput.append(line_denom) 843 844 if time_info: 845 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 846 "%d-%m-%Y %H:%M")) 847 if progress_bar!=None: 848 progress_bar.finish() 849 850 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
851
852 - def get_context(self,matrix_element):
853 """ Returns the contextual variables which need to be set when 854 pre-processing the template files.""" 855 856 # The nSquaredSO entry of the general replace dictionary should have 857 # been set in write_loopmatrix prior to the first call to this function 858 # However, for cases where the TIRCaching contextual variable is 859 # irrelevant (like in the default output), this might not be the case 860 # so we set it to 1. 861 try: 862 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 863 except (KeyError, AttributeError): 864 n_squared_split_orders = 1 865 866 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 867 self.has_loop_induced = max(LoopInduced, self.has_loop_induced) 868 # Force the computation of loop color flows for loop_induced processes 869 ComputeColorFlows = self.compute_color_flows or LoopInduced 870 # The variable AmplitudeReduction is just to make the contextual 871 # conditions more readable in the include files. 872 AmplitudeReduction = LoopInduced or ComputeColorFlows 873 # Even when not reducing at the amplitude level, the TIR caching 874 # is useful when there is more than one squared split order config. 875 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 876 MadEventOutput = False 877 return {'LoopInduced': LoopInduced, 878 'ComputeColorFlows': ComputeColorFlows, 879 'AmplitudeReduction': AmplitudeReduction, 880 'TIRCaching': TIRCaching, 881 'MadEventOutput': MadEventOutput}
882 883 884 #=========================================================================== 885 # generate_subprocess_directory 886 #===========================================================================
887 - def generate_loop_subprocess(self, matrix_element, fortran_model, 888 group_number = None, proc_id = None, config_map=None, unique_id=None):
889 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 890 including the necessary loop_matrix.f, born_matrix.f and include files. 891 Notice that this is too different from generate_subprocess_directory 892 so that there is no point reusing this mother function. 893 The 'group_number' and 'proc_id' options are only used for the LoopInduced 894 MadEvent output and only to specify the ME_identifier and the P* 895 SubProcess directory name.""" 896 897 cwd = os.getcwd() 898 proc_dir_name = self.get_SubProc_folder_name( 899 matrix_element.get('processes')[0],group_number,proc_id) 900 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 901 902 try: 903 os.mkdir(dirpath) 904 except os.error as error: 905 logger.warning(error.strerror + " " + dirpath) 906 907 try: 908 os.chdir(dirpath) 909 except os.error: 910 logger.error('Could not cd to directory %s' % dirpath) 911 return 0 912 913 logger.info('Creating files in directory %s' % dirpath) 914 915 if unique_id is None: 916 raise MadGraph5Error, 'A unique id must be provided to the function'+\ 917 'generate_loop_subprocess of LoopProcessExporterFortranSA.' 918 # Create an include with the unique consecutive ID assigned 919 open('unique_id.inc','w').write( 920 """ integer UNIQUE_ID 921 parameter(UNIQUE_ID=%d)"""%unique_id) 922 923 # Extract number of external particles 924 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 925 926 calls=self.write_loop_matrix_element_v4(None,matrix_element, 927 fortran_model, group_number = group_number, 928 proc_id = proc_id, config_map = config_map) 929 930 # We assume here that all processes must share the same property of 931 # having a born or not, which must be true anyway since these are two 932 # definite different classes of processes which can never be treated on 933 # the same footing. 934 if matrix_element.get('processes')[0].get('has_born'): 935 filename = 'born_matrix.f' 936 calls = self.write_bornmatrix( 937 writers.FortranWriter(filename), 938 matrix_element, 939 fortran_model) 940 941 filename = 'pmass.inc' 942 self.write_pmass_file(writers.FortranWriter(filename), 943 matrix_element) 944 945 filename = 'ngraphs.inc' 946 self.write_ngraphs_file(writers.FortranWriter(filename), 947 len(matrix_element.get_all_amplitudes())) 948 949 # Do not draw the loop diagrams if they are too many. 950 # The user can always decide to do it manually, if really needed 951 loop_diags = [loop_diag for loop_diag in\ 952 matrix_element.get('base_amplitude').get('loop_diagrams')\ 953 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 954 if len(loop_diags)>5000: 955 logger.info("There are more than 5000 loop diagrams."+\ 956 "Only the first 5000 are drawn.") 957 filename = "loop_matrix.ps" 958 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 959 loop_diags[:5000]),filename, 960 model=matrix_element.get('processes')[0].get('model'),amplitude='') 961 logger.info("Drawing loop Feynman diagrams for " + \ 962 matrix_element.get('processes')[0].nice_string()) 963 plot.draw() 964 965 if matrix_element.get('processes')[0].get('has_born'): 966 filename = "born_matrix.ps" 967 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 968 get('born_diagrams'), 969 filename, 970 model=matrix_element.get('processes')[0].\ 971 get('model'), 972 amplitude='') 973 logger.info("Generating born Feynman diagrams for " + \ 974 matrix_element.get('processes')[0].nice_string(\ 975 print_weighted=False)) 976 plot.draw() 977 978 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 979 matrix_element.get('processes')[0],group_number,proc_id)) 980 981 # Return to original PWD 982 os.chdir(cwd) 983 984 if not calls: 985 calls = 0 986 return calls
987 1008
1009 - def generate_general_replace_dict(self,matrix_element, 1010 group_number = None, proc_id = None):
1011 """Generates the entries for the general replacement dictionary used 1012 for the different output codes for this exporter.The arguments 1013 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 1014 1015 dict={} 1016 # A general process prefix which appears in front of all MadLooop 1017 # subroutines and common block so that several processes can be compiled 1018 # together into one library, as necessary to follow BLHA guidelines. 1019 1020 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 1021 group_number = group_number, group_elem_number = proc_id) 1022 1023 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1024 for proc in matrix_element.get('processes'): 1025 ids = [l.get('id') for l in proc.get('legs_with_decays')] 1026 self.prefix_info[tuple(ids)] = [dict['proc_prefix'], proc.get_tag()] 1027 1028 # The proc_id is used for MadEvent grouping, so none of our concern here 1029 # and it is simply set to an empty string. 1030 dict['proc_id'] = '' 1031 # Extract version number and date from VERSION file 1032 info_lines = self.get_mg5_info_lines() 1033 dict['info_lines'] = info_lines 1034 # Extract process info lines 1035 process_lines = self.get_process_info_lines(matrix_element) 1036 dict['process_lines'] = process_lines 1037 # Extract number of external particles 1038 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1039 dict['nexternal'] = nexternal 1040 dict['nincoming'] = ninitial 1041 # Extract ncomb 1042 ncomb = matrix_element.get_helicity_combinations() 1043 dict['ncomb'] = ncomb 1044 # Extract nloopamps 1045 nloopamps = matrix_element.get_number_of_loop_amplitudes() 1046 dict['nloopamps'] = nloopamps 1047 # Extract nloopdiags 1048 nloopdiags = len(matrix_element.get('diagrams')) 1049 dict['nloopdiags'] = nloopdiags 1050 # Extract nctamps 1051 nctamps = matrix_element.get_number_of_CT_amplitudes() 1052 dict['nctamps'] = nctamps 1053 # Extract nwavefuncs 1054 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 1055 dict['nwavefuncs'] = nwavefuncs 1056 # Set format of the double precision 1057 dict['real_dp_format']='real*8' 1058 dict['real_mp_format']='real*16' 1059 # Set format of the complex 1060 dict['complex_dp_format']='complex*16' 1061 dict['complex_mp_format']='complex*32' 1062 # Set format of the masses 1063 dict['mass_dp_format'] = dict['complex_dp_format'] 1064 dict['mass_mp_format'] = dict['complex_mp_format'] 1065 # Fill in default values for the placeholders for the madevent 1066 # loop-induced output 1067 dict['nmultichannels'] = 0 1068 dict['nmultichannel_configs'] = 0 1069 dict['config_map_definition'] = '' 1070 dict['config_index_map_definition'] = '' 1071 # Color matrix size 1072 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 1073 # it is NLOOPAMPSxNBORNAMPS 1074 # Also, how to access the number of Born squared order contributions 1075 1076 if matrix_element.get('processes')[0].get('has_born'): 1077 dict['color_matrix_size'] = 'nbornamps' 1078 dict['get_nsqso_born']=\ 1079 "include 'nsqso_born.inc'" 1080 else: 1081 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 1082 PARAMETER (NSQSO_BORN=0) 1083 """ 1084 dict['color_matrix_size'] = 'nloopamps' 1085 1086 # These placeholders help to have as many common templates for the 1087 # output of the loop induced processes and those with a born 1088 # contribution. 1089 if matrix_element.get('processes')[0].get('has_born'): 1090 # Extract nbornamps 1091 nbornamps = matrix_element.get_number_of_born_amplitudes() 1092 dict['nbornamps'] = nbornamps 1093 dict['ncomb_helas_objs'] = ',ncomb' 1094 dict['nbornamps_decl'] = \ 1095 """INTEGER NBORNAMPS 1096 PARAMETER (NBORNAMPS=%d)"""%nbornamps 1097 dict['nBornAmps'] = nbornamps 1098 1099 else: 1100 dict['ncomb_helas_objs'] = '' 1101 dict['dp_born_amps_decl'] = '' 1102 dict['dp_born_amps_decl_in_mp'] = '' 1103 dict['copy_mp_to_dp_born_amps'] = '' 1104 dict['mp_born_amps_decl'] = '' 1105 dict['nbornamps_decl'] = '' 1106 dict['nbornamps'] = 0 1107 dict['nBornAmps'] = 0 1108 1109 return dict
1110
1111 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1112 group_number = None, proc_id = None, config_map = None):
1113 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 1114 mp_born_amps_and_wfs. 1115 The arguments group_number and proc_id are just for the LoopInduced 1116 output with MadEvent and only used in get_ME_identifier. 1117 """ 1118 1119 # Create the necessary files for the loop matrix element subroutine 1120 1121 if config_map: 1122 raise MadGraph5Error, 'The default loop output cannot be used with'+\ 1123 'MadEvent and cannot compute the AMP2 for multi-channeling.' 1124 1125 if not isinstance(fortran_model,\ 1126 helas_call_writers.FortranUFOHelasCallWriter): 1127 raise MadGraph5Error, 'The loop fortran output can only'+\ 1128 ' work with a UFO Fortran model' 1129 1130 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 1131 argument=fortran_model.get('model'), 1132 hel_sum=matrix_element.get('processes')[0].get('has_born')) 1133 1134 # Compute the analytical information of the loop wavefunctions in the 1135 # loop helas matrix elements using the cached aloha model to reuse 1136 # as much as possible the aloha computations already performed for 1137 # writing out the aloha fortran subroutines. 1138 matrix_element.compute_all_analytic_information( 1139 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1140 1141 # Initialize a general replacement dictionary with entries common to 1142 # many files generated here. 1143 matrix_element.rep_dict = self.generate_general_replace_dict( 1144 matrix_element, group_number = group_number, proc_id = proc_id) 1145 1146 # Extract max number of loop couplings (specific to this output type) 1147 matrix_element.rep_dict['maxlcouplings']= \ 1148 matrix_element.find_max_loop_coupling() 1149 # The born amp declaration suited for also outputing the loop-induced 1150 # processes as well. 1151 if matrix_element.get('processes')[0].get('has_born'): 1152 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 1153 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 1154 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 1155 matrix_element.rep_dict['dp_born_amps_decl'] = \ 1156 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1157 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1158 matrix_element.rep_dict['mp_born_amps_decl'] = \ 1159 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1160 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1161 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 1162 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 1163 1164 if writer: 1165 raise MadGraph5Error, 'Matrix output mode no longer supported.' 1166 1167 filename = 'loop_matrix.f' 1168 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1169 matrix_element, 1170 LoopFortranModel) 1171 1172 # Write out the proc_prefix in a file, this is quite handy 1173 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1174 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1175 proc_prefix_writer.close() 1176 1177 filename = 'check_sa.f' 1178 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1179 1180 filename = 'CT_interface.f' 1181 self.write_CT_interface(writers.FortranWriter(filename),\ 1182 matrix_element) 1183 1184 1185 1186 filename = 'improve_ps.f' 1187 calls = self.write_improve_ps(writers.FortranWriter(filename), 1188 matrix_element) 1189 1190 filename = 'loop_num.f' 1191 self.write_loop_num(writers.FortranWriter(filename),\ 1192 matrix_element,LoopFortranModel) 1193 1194 filename = 'mp_born_amps_and_wfs.f' 1195 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 1196 matrix_element,LoopFortranModel) 1197 1198 # Extract number of external particles 1199 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1200 filename = 'nexternal.inc' 1201 self.write_nexternal_file(writers.FortranWriter(filename), 1202 nexternal, ninitial) 1203 1204 filename = 'process_info.inc' 1205 self.write_process_info_file(writers.FortranWriter(filename), 1206 matrix_element) 1207 return calls
1208
1209 - def write_process_info_file(self, writer, matrix_element):
1210 """A small structural function to write the include file specifying some 1211 process characteristics.""" 1212 1213 model = matrix_element.get('processes')[0].get('model') 1214 process_info = {} 1215 # The maximum spin of any particle connected (or directly running in) 1216 # any loop of this matrix element. This is important because there is 1217 # some limitation in the stability tests that can be performed when this 1218 # maximum spin is above 3 (vectors). Also CutTools has limitations in 1219 # this regard. 1220 process_info['max_spin_connected_to_loop']=\ 1221 matrix_element.get_max_spin_connected_to_loop() 1222 1223 process_info['max_spin_external_particle']= max( 1224 model.get_particle(l.get('id')).get('spin') for l in 1225 matrix_element.get('processes')[0].get('legs')) 1226 1227 proc_include = \ 1228 """ 1229 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1230 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1231 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1232 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1233 """%process_info 1234 1235 writer.writelines(proc_include)
1236
1237 - def generate_subprocess_directory(self, matrix_element, fortran_model):
1238 """ To overload the default name for this function such that the correct 1239 function is used when called from the command interface """ 1240 1241 self.unique_id +=1 1242 return self.generate_loop_subprocess(matrix_element,fortran_model, 1243 unique_id=self.unique_id)
1244
1245 - def write_check_sa(self, writer, matrix_element):
1246 """Writes out the steering code check_sa. In the optimized output mode, 1247 All the necessary entries in the replace_dictionary have already been 1248 set in write_loopmatrix because it is only there that one has access to 1249 the information about split orders.""" 1250 replace_dict = copy.copy(matrix_element.rep_dict) 1251 for key in ['print_so_born_results','print_so_loop_results', 1252 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1253 if key not in replace_dict.keys(): 1254 replace_dict[key]='' 1255 1256 if matrix_element.get('processes')[0].get('has_born'): 1257 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1258 else: 1259 file = open(os.path.join(self.template_dir,\ 1260 'check_sa_loop_induced.inc')).read() 1261 file=file%replace_dict 1262 writer.writelines(file) 1263 1264 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1265 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1266 return 1267 1268 file = open(os.path.join(self.template_dir,\ 1269 'check_py.f.inc')).read() 1270 1271 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1272 replace_dict['prefix_routine'] = replace_dict['proc_prefix'] 1273 else: 1274 replace_dict['prefix_routine'] = '' 1275 file=file%replace_dict 1276 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1277 new_writer = writer.__class__(new_path, 'w') 1278 new_writer.writelines(file) 1279 1280 file = open(os.path.join(self.template_dir,\ 1281 'check_sa.py.inc')).read() 1282 # For now just put in an empty PS point but in the future, maybe generate 1283 # a valid one already here by default 1284 curr_proc = matrix_element.get('processes')[0] 1285 random_PSpoint_python_formatted = \ 1286 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1287 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1288 1289 process_definition_string = curr_proc.nice_string().replace('Process:','') 1290 file=file.format(random_PSpoint_python_formatted,process_definition_string, 1291 replace_dict['proc_prefix'].lower()) 1292 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1293 new_writer = open(new_path, 'w') 1294 new_writer.writelines(file) 1295 # Make it executable 1296 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1297
1298 - def write_improve_ps(self, writer, matrix_element):
1299 """ Write out the improve_ps subroutines which modify the PS point 1300 given in input and slightly deform it to achieve exact onshellness on 1301 all external particles as well as perfect energy-momentum conservation""" 1302 replace_dict = copy.copy(matrix_element.rep_dict) 1303 1304 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1305 replace_dict['ninitial']=ninitial 1306 mass_list=matrix_element.get_external_masses()[:-2] 1307 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1308 1309 # Write the quadruple precision version of this routine only. 1310 replace_dict['real_format']=replace_dict['real_mp_format'] 1311 replace_dict['mp_prefix']='MP_' 1312 replace_dict['exp_letter']='e' 1313 replace_dict['mp_specifier']='_16' 1314 replace_dict['coupl_inc_name']='mp_coupl.inc' 1315 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1316 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1317 i, m in enumerate(mass_list)]) 1318 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1319 file_mp=file_mp%replace_dict 1320 # 1321 writer.writelines(file_mp)
1322
1323 - def write_loop_num(self, writer, matrix_element,fortran_model):
1324 """ Create the file containing the core subroutine called by CutTools 1325 which contains the Helas calls building the loop""" 1326 1327 if not matrix_element.get('processes') or \ 1328 not matrix_element.get('diagrams'): 1329 return 0 1330 1331 # Set lowercase/uppercase Fortran code 1332 writers.FortranWriter.downcase = False 1333 1334 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1335 1336 replace_dict = copy.copy(matrix_element.rep_dict) 1337 1338 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1339 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1340 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1341 1342 # The squaring is only necessary for the processes with born where the 1343 # sum over helicities is done before sending the numerator to CT. 1344 dp_squaring_lines=['DO I=1,NBORNAMPS', 1345 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1346 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1347 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1348 mp_squaring_lines=['DO I=1,NBORNAMPS', 1349 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1350 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1351 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1352 if matrix_element.get('processes')[0].get('has_born'): 1353 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1354 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1355 else: 1356 replace_dict['dp_squaring']='RES=BUFF' 1357 replace_dict['mp_squaring']='QPRES=BUFF' 1358 1359 # Prepend MP_ to all helas calls. 1360 self.turn_to_mp_calls(loop_helas_calls) 1361 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1362 1363 file=file%replace_dict 1364 1365 if writer: 1366 writer.writelines(file) 1367 else: 1368 return file
1369
1370 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1371 """ Create the file CT_interface.f which contains the subroutine defining 1372 the loop HELAS-like calls along with the general interfacing subroutine. 1373 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1374 1375 files=[] 1376 1377 # First write CT_interface which interfaces MG5 with CutTools. 1378 replace_dict=copy.copy(matrix_element.rep_dict) 1379 1380 # We finalize CT result differently wether we used the built-in 1381 # squaring against the born. 1382 if matrix_element.get('processes')[0].get('has_born'): 1383 replace_dict['finalize_CT']='\n'.join([\ 1384 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1385 else: 1386 replace_dict['finalize_CT']='\n'.join([\ 1387 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1388 1389 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1390 1391 file = file % replace_dict 1392 files.append(file) 1393 1394 # Now collect the different kind of subroutines needed for the 1395 # loop HELAS-like calls. 1396 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1397 1398 for callkey in HelasLoopAmpsCallKeys: 1399 replace_dict=copy.copy(matrix_element.rep_dict) 1400 # Add to this dictionary all other attribute common to all 1401 # HELAS-like loop subroutines. 1402 if matrix_element.get('processes')[0].get('has_born'): 1403 replace_dict['validh_or_nothing']=',validh' 1404 else: 1405 replace_dict['validh_or_nothing']='' 1406 # In the optimized output, the number of couplings in the loop is 1407 # not specified so we only treat it here if necessary: 1408 if len(callkey)>2: 1409 replace_dict['ncplsargs']=callkey[2] 1410 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1411 replace_dict['cplsargs']=cplsargs 1412 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1413 replace_dict['cplsdecl']=cplsdecl 1414 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1415 replace_dict['mp_cplsdecl']=mp_cplsdecl 1416 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1417 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1418 for i in range(1,callkey[2]+1)]) 1419 replace_dict['cplset']=cplset 1420 1421 replace_dict['nloopline']=callkey[0] 1422 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1423 replace_dict['wfsargs']=wfsargs 1424 # We don't pass the multiple precision mass in the optimized_output 1425 if not optimized_output: 1426 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1427 else: 1428 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1429 replace_dict['margs']=margs 1430 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1431 replace_dict['wfsargsdecl']=wfsargsdecl 1432 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1433 replace_dict['margsdecl']=margsdecl 1434 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1435 replace_dict['mp_margsdecl']=mp_margsdecl 1436 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1437 i in range(1,callkey[1]+1)]) 1438 replace_dict['weset']=weset 1439 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1440 replace_dict['weset']=weset 1441 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1442 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1443 i in range(2,callkey[0]+1)]) 1444 replace_dict['mset']=mset 1445 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1446 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1447 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1448 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1449 i in range(3,callkey[0]+3)]) 1450 replace_dict['mset2']=mset2 1451 replace_dict['nwfsargs'] = callkey[1] 1452 if callkey[0]==callkey[1]: 1453 replace_dict['nwfsargs_header'] = "" 1454 replace_dict['pairingargs']="" 1455 replace_dict['pairingdecl']="" 1456 pairingset="""DO I=1,NLOOPLINE 1457 PAIRING(I)=1 1458 ENDDO 1459 """ 1460 replace_dict['pairingset']=pairingset 1461 else: 1462 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1463 pairingargs="".join([("P"+str(i)+", ") for i in \ 1464 range(1,callkey[0]+1)]) 1465 replace_dict['pairingargs']=pairingargs 1466 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1467 range(1,callkey[0]+1)])[:-2] 1468 replace_dict['pairingdecl']=pairingdecl 1469 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1470 i in range(1,callkey[0]+1)]) 1471 replace_dict['pairingset']=pairingset 1472 1473 file = open(os.path.join(self.template_dir,\ 1474 'helas_loop_amplitude.inc')).read() 1475 file = file % replace_dict 1476 files.append(file) 1477 1478 file="\n".join(files) 1479 1480 if writer: 1481 writer.writelines(file,context=self.get_context(matrix_element)) 1482 else: 1483 return file
1484 1485 # Helper function to split HELAS CALLS in dedicated subroutines placed 1486 # in different files.
1487 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1488 helas_calls, entry_name, bunch_name,n_helas=2000, 1489 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1490 continue_label = 1000, momenta_array_name='P', 1491 context={}):
1492 """ Finish the code generation with splitting. 1493 Split the helas calls in the argument helas_calls into bunches of 1494 size n_helas and place them in dedicated subroutine with name 1495 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1496 in the replace_dict dictionary under the entry entry_name. 1497 The context specified will be forwarded to the the fileWriter.""" 1498 helascalls_replace_dict=copy.copy(replace_dict) 1499 helascalls_replace_dict['bunch_name']=bunch_name 1500 helascalls_files=[] 1501 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1502 helascalls_replace_dict['bunch_number']=i+1 1503 helascalls_replace_dict['helas_calls']=\ 1504 '\n'.join(helas_calls[k:k + n_helas]) 1505 helascalls_replace_dict['required_so_broadcaster']=\ 1506 required_so_broadcaster 1507 helascalls_replace_dict['continue_label']=continue_label 1508 new_helascalls_file = open(os.path.join(self.template_dir,\ 1509 template_name)).read() 1510 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1511 helascalls_files.append(new_helascalls_file) 1512 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1513 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1514 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1515 for a in range(len(helascalls_files))] 1516 replace_dict[entry_name]='\n'.join(helascalls_calls) 1517 if writer: 1518 for i, helascalls_file in enumerate(helascalls_files): 1519 filename = '%s_%d.f'%(bunch_name,i+1) 1520 writers.FortranWriter(filename).writelines(helascalls_file, 1521 context=context) 1522 else: 1523 masterfile='\n'.join([masterfile,]+helascalls_files) 1524 1525 return masterfile
1526
1527 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1528 noSplit=False):
1529 """Create the loop_matrix.f file.""" 1530 1531 if not matrix_element.get('processes') or \ 1532 not matrix_element.get('diagrams'): 1533 return 0 1534 1535 # Set lowercase/uppercase Fortran code 1536 1537 writers.FortranWriter.downcase = False 1538 1539 replace_dict = copy.copy(matrix_element.rep_dict) 1540 1541 # Extract overall denominator 1542 # Averaging initial state color, spin, and identical FS particles 1543 den_factor_line = self.get_den_factor_line(matrix_element) 1544 replace_dict['den_factor_line'] = den_factor_line 1545 # When the user asks for the polarized matrix element we must 1546 # multiply back by the helicity averaging factor 1547 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1548 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 1549 matrix_element.get_beams_hel_avg_factor() 1550 1551 # These entries are specific for the output for loop-induced processes 1552 # Also sets here the details of the squaring of the loop ampltiudes 1553 # with the born or the loop ones. 1554 if not matrix_element.get('processes')[0].get('has_born'): 1555 replace_dict['compute_born']=\ 1556 """C There is of course no born for loop induced processes 1557 ANS(0)=0.0d0 1558 """ 1559 replace_dict['set_reference']='\n'.join([ 1560 'C For loop-induced, the reference for comparison is set later'+\ 1561 ' from the total contribution of the previous PS point considered.', 1562 'C But you can edit here the value to be used for the first PS point.', 1563 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1564 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1565 replace_dict['loop_induced_setup'] = '\n'.join([ 1566 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1567 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1568 replace_dict['loop_induced_finalize'] = \ 1569 ("""DO I=NCTAMPS+1,NLOOPAMPS 1570 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1571 WRITE(*,*) '##W03 WARNING Contribution ',I 1572 WRITE(*,*) ' is unstable for helicity ',H 1573 ENDIF 1574 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1575 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1576 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1577 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1578 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1579 C ENDIF 1580 ENDDO 1581 1227 CONTINUE 1582 HELPICKED=HELPICKED_BU""")%replace_dict 1583 replace_dict['loop_helas_calls']="" 1584 replace_dict['nctamps_or_nloopamps']='nloopamps' 1585 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1586 replace_dict['squaring']=\ 1587 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1588 IF (J.EQ.1) THEN 1589 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1590 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1591 ENDIF""" 1592 else: 1593 replace_dict['compute_born']=\ 1594 """C Compute the born, for a specific helicity if asked so. 1595 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1596 """%matrix_element.rep_dict 1597 replace_dict['set_reference']=\ 1598 """C We chose to use the born evaluation for the reference 1599 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1600 replace_dict['loop_induced_helas_calls'] = "" 1601 replace_dict['loop_induced_finalize'] = "" 1602 replace_dict['loop_induced_setup'] = "" 1603 replace_dict['nctamps_or_nloopamps']='nctamps' 1604 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1605 replace_dict['squaring']='\n'.join(['DO K=1,3', 1606 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1607 'ENDDO']) 1608 1609 # Write a dummy nsquaredSO.inc which is used in the default 1610 # loop_matrix.f code (even though it does not support split orders evals) 1611 # just to comply with the syntax expected from the external code using MadLoop. 1612 writers.FortranWriter('nsquaredSO.inc').writelines( 1613 """INTEGER NSQUAREDSO 1614 PARAMETER (NSQUAREDSO=0)""") 1615 1616 # Actualize results from the loops computed. Only necessary for 1617 # processes with a born. 1618 actualize_ans=[] 1619 if matrix_element.get('processes')[0].get('has_born'): 1620 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1621 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1622 in range(1,4)) 1623 actualize_ans.append(\ 1624 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1625 actualize_ans.append(\ 1626 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1627 actualize_ans.extend(["ENDIF","ENDDO"]) 1628 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1629 else: 1630 replace_dict['actualize_ans']=\ 1631 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1632 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1633 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1634 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1635 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1636 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1637 C ENDIF""")%replace_dict 1638 1639 # Write out the color matrix 1640 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1641 CMWriter=open(pjoin('..','MadLoop5_resources', 1642 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1643 for ColorLine in CMNum: 1644 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1645 CMWriter.close() 1646 CMWriter=open(pjoin('..','MadLoop5_resources', 1647 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1648 for ColorLine in CMDenom: 1649 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1650 CMWriter.close() 1651 1652 # Write out the helicity configurations 1653 HelConfigs=matrix_element.get_helicity_matrix() 1654 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1655 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1656 for HelConfig in HelConfigs: 1657 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1658 HelConfigWriter.close() 1659 1660 # Extract helas calls 1661 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1662 matrix_element) 1663 # The proc_prefix must be replaced 1664 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1665 for lc in loop_amp_helas_calls] 1666 1667 born_ct_helas_calls, UVCT_helas_calls = \ 1668 fortran_model.get_born_ct_helas_calls(matrix_element) 1669 # In the default output, we do not need to separate these two kind of 1670 # contributions 1671 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1672 file = open(os.path.join(self.template_dir,\ 1673 1674 'loop_matrix_standalone.inc')).read() 1675 1676 if matrix_element.get('processes')[0].get('has_born'): 1677 toBeRepaced='loop_helas_calls' 1678 else: 1679 toBeRepaced='loop_induced_helas_calls' 1680 1681 # Decide here wether we need to split the loop_matrix.f file or not. 1682 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1683 file=self.split_HELASCALLS(writer,replace_dict,\ 1684 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1685 'born_ct_helas_calls','helas_calls_ampb') 1686 file=self.split_HELASCALLS(writer,replace_dict,\ 1687 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1688 toBeRepaced,'helas_calls_ampl') 1689 else: 1690 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1691 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1692 1693 file = file % replace_dict 1694 1695 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1696 n_loop_calls = len(filter(lambda call: 1697 not loop_calls_finder.match(call) is None, loop_amp_helas_calls)) 1698 if writer: 1699 # Write the file 1700 writer.writelines(file) 1701 return n_loop_calls 1702 else: 1703 # Return it to be written along with the others 1704 return n_loop_calls, file
1705
1706 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1707 """Create the born_matrix.f file for the born process as for a standard 1708 tree-level computation.""" 1709 1710 if not matrix_element.get('processes') or \ 1711 not matrix_element.get('diagrams'): 1712 return 0 1713 1714 if not isinstance(writer, writers.FortranWriter): 1715 raise writers.FortranWriter.FortranWriterError(\ 1716 "writer not FortranWriter") 1717 1718 # For now, we can use the exact same treatment as for tree-level 1719 # computations by redefining here a regular HelasMatrixElementf or the 1720 # born process. 1721 # It is important to make a deepcopy, as we don't want any possible 1722 # treatment on the objects of the bornME to have border effects on 1723 # the content of the LoopHelasMatrixElement object. 1724 bornME = helas_objects.HelasMatrixElement() 1725 for prop in bornME.keys(): 1726 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1727 bornME.set('base_amplitude',None,force=True) 1728 bornME.set('diagrams',copy.deepcopy(\ 1729 matrix_element.get_born_diagrams())) 1730 bornME.set('color_basis',copy.deepcopy(\ 1731 matrix_element.get('born_color_basis'))) 1732 bornME.set('color_matrix',copy.deepcopy(\ 1733 color_amp.ColorMatrix(bornME.get('color_basis')))) 1734 # This is to decide wether once to reuse old wavefunction to store new 1735 # ones (provided they are not used further in the code.) 1736 bornME.optimization = True 1737 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1738 writer, bornME, fortran_model, 1739 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1740
1741 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1742 noSplit=False):
1743 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1744 computes just the external wavefunction and born amplitudes in 1745 multiple precision. """ 1746 1747 if not matrix_element.get('processes') or \ 1748 not matrix_element.get('diagrams'): 1749 return 0 1750 1751 replace_dict = copy.copy(matrix_element.rep_dict) 1752 1753 # For the wavefunction copy, check what suffix is needed for the W array 1754 if matrix_element.get('processes')[0].get('has_born'): 1755 replace_dict['h_w_suffix']=',H' 1756 else: 1757 replace_dict['h_w_suffix']='' 1758 1759 # Extract helas calls 1760 born_amps_and_wfs_calls , uvct_amp_calls = \ 1761 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1762 # In the default output, these two kind of contributions do not need to 1763 # be differentiated 1764 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1765 1766 # Turn these HELAS calls to the multiple-precision version of the HELAS 1767 # subroutines. 1768 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1769 1770 file = open(os.path.join(self.template_dir,\ 1771 'mp_born_amps_and_wfs.inc')).read() 1772 # Decide here wether we need to split the loop_matrix.f file or not. 1773 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1774 file=self.split_HELASCALLS(writer,replace_dict,\ 1775 'mp_helas_calls_split.inc',file,\ 1776 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1777 'mp_helas_calls') 1778 else: 1779 replace_dict['born_amps_and_wfs_calls']=\ 1780 '\n'.join(born_amps_and_wfs_calls) 1781 1782 file = file % replace_dict 1783 if writer: 1784 # Write the file 1785 writer.writelines(file) 1786 else: 1787 # Return it to be written along with the others 1788 return file 1789 1790 #=============================================================================== 1791 # LoopProcessOptimizedExporterFortranSA 1792 #=============================================================================== 1793
1794 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1795 """Class to take care of exporting a set of loop matrix elements in the 1796 Fortran format which exploits the Pozzorini method of representing 1797 the loop numerators as polynomial to render its evaluations faster.""" 1798 1799 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1800 # The option below controls wether one wants to group together in one single 1801 # CutTools/TIR call the loops with same denominator structure 1802 forbid_loop_grouping = False 1803 1804 # List of potential TIR library one wants to link to. 1805 # Golem and Samurai will typically get obtained from gosam_contrib 1806 # which might also contain a version of ninja. We must therefore 1807 # make sure that ninja appears first in the list of -L because 1808 # it is the tool for which the user is most susceptible of 1809 # using a standalone verison independent of gosam_contrib 1810 all_tir=['pjfry','iregi','ninja','golem','samurai','collier'] 1811
1812 - def __init__(self, dir_path = "", opt=None):
1813 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1814 information on where to find all the loop-related source files, 1815 like CutTools and TIR""" 1816 1817 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt) 1818 1819 # TIR available ones 1820 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1821 'samurai':True,'ninja':True,'collier':True} 1822 1823 for tir in self.all_tir: 1824 tir_dir="%s_dir"%tir 1825 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1826 # Make sure to defer the 'local path' to the current MG5aMC root. 1827 tir_path = self.opt[tir_dir].strip() 1828 if tir_path.startswith('.'): 1829 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1830 setattr(self,tir_dir,tir_path) 1831 else: 1832 setattr(self,tir_dir,'')
1833
1834 - def copy_template(self, model):
1835 """Additional actions needed to setup the Template. 1836 """ 1837 1838 super(LoopProcessOptimizedExporterFortranSA, self).copy_template(model) 1839 1840 self.loop_optimized_additional_template_setup()
1841
1842 - def get_context(self,matrix_element, **opts):
1843 """ Additional contextual information which needs to be created for 1844 the optimized output.""" 1845 1846 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1847 **opts) 1848 1849 # For now assume Ninja always supports quadruple precision 1850 try: 1851 context['ninja_supports_quad_prec'] = \ 1852 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1853 except AttributeError: 1854 context['ninja_supports_quad_prec'] = False 1855 1856 for tir in self.all_tir: 1857 context['%s_available'%tir]=self.tir_available_dict[tir] 1858 # safety check 1859 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']: 1860 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name 1861 1862 return context
1863
1865 """ Perform additional actions specific for this class when setting 1866 up the template with the copy_template function.""" 1867 1868 # We must link the TIR to the Library folder of the active Template 1869 link_tir_libs=[] 1870 tir_libs=[] 1871 tir_include=[] 1872 1873 for tir in self.all_tir: 1874 tir_dir="%s_dir"%tir 1875 libpath=getattr(self,tir_dir) 1876 libname="lib%s.a"%tir 1877 tir_name=tir 1878 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1879 libpath,libname,tir_name=tir_name) 1880 if libpath != "": 1881 if tir in ['ninja','pjfry','golem','samurai','collier']: 1882 # It is cleaner to use the original location of the libraries 1883 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1884 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1885 # For Ninja, we must also link against OneLoop. 1886 if tir in ['ninja']: 1887 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 1888 for ext in ['a','dylib','so']): 1889 raise MadGraph5Error( 1890 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 1891 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 1892 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 1893 if tir in ['ninja','golem', 'samurai','collier']: 1894 trgt_path = pjoin(os.path.dirname(libpath),'include') 1895 if os.path.isdir(trgt_path): 1896 to_include = misc.find_includes_path(trgt_path, 1897 self.include_names[tir]) 1898 else: 1899 to_include = None 1900 # Special possible location for collier 1901 if to_include is None and tir=='collier': 1902 to_include = misc.find_includes_path( 1903 pjoin(libpath,'modules'),self.include_names[tir]) 1904 if to_include is None: 1905 logger.error( 1906 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1907 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1908 to_include = '<Not_found_define_it_yourself>' 1909 tir_include.append('-I %s'%str(to_include)) 1910 # To be able to easily compile a MadLoop library using 1911 # makefiles built outside of the MG5_aMC framework 1912 # (such as what is done with the Sherpa interface), we 1913 # place here an easy handle on the golem includes 1914 name_map = {'golem':'golem95','samurai':'samurai', 1915 'ninja':'ninja','collier':'collier'} 1916 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1917 name='%s_include'%name_map[tir],abspath=True) 1918 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1919 name='%s_lib'%name_map[tir],abspath=True) 1920 else : 1921 link_tir_libs.append('-l%s'%tir) 1922 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1923 1924 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1925 'MadLoop_makefile_definitions') 1926 if os.path.isfile(MadLoop_makefile_definitions): 1927 os.remove(MadLoop_makefile_definitions) 1928 1929 calls = self.write_loop_makefile_definitions( 1930 writers.MakefileWriter(MadLoop_makefile_definitions), 1931 link_tir_libs,tir_libs, tir_include=tir_include) 1932 1933 # Finally overwrite MadLoopCommons.f now that we know the availibility of 1934 # COLLIER. 1935 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 1936 "SubProcesses","MadLoopCommons.inc")).read() 1937 writer = writers.FortranWriter(os.path.join(self.dir_path, 1938 "SubProcesses","MadLoopCommons.f")) 1939 writer.writelines(MadLoopCommon%{ 1940 'print_banner_commands':self.MadLoop_banner}, context={ 1941 'collier_available':self.tir_available_dict['collier']}) 1942 writer.close()
1943 1955 1956 2084
2085 - def set_group_loops(self, matrix_element):
2086 """ Decides whether we must group loops or not for this matrix element""" 2087 2088 # Decide if loops sharing same denominator structures have to be grouped 2089 # together or not. 2090 if self.forbid_loop_grouping: 2091 self.group_loops = False 2092 else: 2093 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 2094 and matrix_element.get('processes')[0].get('has_born') 2095 2096 return self.group_loops
2097
2098 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2099 """create the global information for loops""" 2100 2101 super(LoopProcessOptimizedExporterFortranSA,self).finalize(matrix_element, 2102 cmdhistory, MG5options, outputflag) 2103 self.write_global_specs(matrix_element)
2104 2105 2106
2107 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 2108 group_number = None, proc_id = None, config_map = None):
2109 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 2110 and loop_num.f only but with the optimized FortranModel. 2111 The arguments group_number and proc_id are just for the LoopInduced 2112 output with MadEvent and only used in get_ME_identifier.""" 2113 2114 # Warn the user that the 'matrix' output where all relevant code is 2115 # put together in a single file is not supported in this loop output. 2116 if writer: 2117 raise MadGraph5Error, 'Matrix output mode no longer supported.' 2118 2119 if not isinstance(fortran_model,\ 2120 helas_call_writers.FortranUFOHelasCallWriter): 2121 raise MadGraph5Error, 'The optimized loop fortran output can only'+\ 2122 ' work with a UFO Fortran model' 2123 OptimizedFortranModel=\ 2124 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 2125 fortran_model.get('model'),False) 2126 2127 2128 if not matrix_element.get('processes')[0].get('has_born') and \ 2129 not self.compute_color_flows: 2130 logger.debug("Color flows will be employed despite the option"+\ 2131 " 'loop_color_flows' being set to False because it is necessary"+\ 2132 " for optimizations.") 2133 2134 # Compute the analytical information of the loop wavefunctions in the 2135 # loop helas matrix elements using the cached aloha model to reuse 2136 # as much as possible the aloha computations already performed for 2137 # writing out the aloha fortran subroutines. 2138 matrix_element.compute_all_analytic_information( 2139 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 2140 2141 self.set_group_loops(matrix_element) 2142 2143 # Initialize a general replacement dictionary with entries common to 2144 # many files generated here. 2145 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 2146 generate_general_replace_dict(self, matrix_element, 2147 group_number = group_number, proc_id = proc_id) 2148 2149 # and those specific to the optimized output 2150 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 2151 2152 # Create the necessary files for the loop matrix element subroutine 2153 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 2154 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 2155 proc_prefix_writer.close() 2156 2157 filename = 'loop_matrix.f' 2158 calls = self.write_loopmatrix(writers.FortranWriter(filename), 2159 matrix_element, 2160 OptimizedFortranModel) 2161 2162 filename = 'check_sa.f' 2163 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 2164 2165 filename = 'polynomial.f' 2166 calls = self.write_polynomial_subroutines( 2167 writers.FortranWriter(filename), 2168 matrix_element) 2169 2170 filename = 'improve_ps.f' 2171 calls = self.write_improve_ps(writers.FortranWriter(filename), 2172 matrix_element) 2173 2174 filename = 'CT_interface.f' 2175 self.write_CT_interface(writers.FortranWriter(filename),\ 2176 matrix_element) 2177 2178 filename = 'TIR_interface.f' 2179 self.write_TIR_interface(writers.FortranWriter(filename), 2180 matrix_element) 2181 2182 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 2183 filename = 'GOLEM_interface.f' 2184 self.write_GOLEM_interface(writers.FortranWriter(filename), 2185 matrix_element) 2186 2187 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']: 2188 filename = 'COLLIER_interface.f' 2189 self.write_COLLIER_interface(writers.FortranWriter(filename), 2190 matrix_element) 2191 2192 filename = 'loop_num.f' 2193 self.write_loop_num(writers.FortranWriter(filename),\ 2194 matrix_element,OptimizedFortranModel) 2195 2196 filename = 'mp_compute_loop_coefs.f' 2197 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 2198 matrix_element,OptimizedFortranModel) 2199 2200 if self.get_context(matrix_element)['ComputeColorFlows']: 2201 filename = 'compute_color_flows.f' 2202 self.write_compute_color_flows(writers.FortranWriter(filename), 2203 matrix_element, config_map = config_map) 2204 2205 # Extract number of external particles 2206 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2207 filename = 'nexternal.inc' 2208 self.write_nexternal_file(writers.FortranWriter(filename), 2209 nexternal, ninitial) 2210 2211 # Write general process information 2212 filename = 'process_info.inc' 2213 self.write_process_info_file(writers.FortranWriter(filename), 2214 matrix_element) 2215 2216 if self.get_context(matrix_element)['TIRCaching']: 2217 filename = 'tir_cache_size.inc' 2218 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 2219 2220 return calls
2221
2222 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
2223 """ Specify the entries of the replacement dictionary which are specific 2224 to the optimized output and only relevant to it (the more general entries 2225 are set in the the mother class LoopProcessExporterFortranSA.""" 2226 2227 max_loop_rank=matrix_element.get_max_loop_rank() 2228 matrix_element.rep_dict['maxrank']=max_loop_rank 2229 matrix_element.rep_dict['loop_max_coefs']=\ 2230 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 2231 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 2232 matrix_element.rep_dict['vertex_max_coefs']=\ 2233 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 2234 2235 matrix_element.rep_dict['nloopwavefuncs']=\ 2236 matrix_element.get_number_of_loop_wavefunctions() 2237 max_spin=matrix_element.get_max_loop_particle_spin() 2238 2239 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 2240 matrix_element.rep_dict['nloops']=len(\ 2241 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 2242 lamp in ldiag.get_loop_amplitudes()]) 2243 2244 if self.set_group_loops(matrix_element): 2245 matrix_element.rep_dict['nloop_groups']=\ 2246 len(matrix_element.get('loop_groups')) 2247 else: 2248 matrix_element.rep_dict['nloop_groups']=\ 2249 matrix_element.rep_dict['nloops']
2250
2251 - def write_loop_num(self, writer, matrix_element,fortran_model):
2252 """ Create the file containing the core subroutine called by CutTools 2253 which contains the Helas calls building the loop""" 2254 2255 replace_dict=copy.copy(matrix_element.rep_dict) 2256 2257 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 2258 file = file % replace_dict 2259 writer.writelines(file,context=self.get_context(matrix_element))
2260
2261 - def write_CT_interface(self, writer, matrix_element):
2262 """ We can re-use the mother one for the loop optimized output.""" 2263 LoopProcessExporterFortranSA.write_CT_interface(\ 2264 self, writer, matrix_element,optimized_output=True)
2265
2266 - def write_TIR_interface(self, writer, matrix_element):
2267 """ Create the file TIR_interface.f which does NOT contain the subroutine 2268 defining the loop HELAS-like calls along with the general interfacing 2269 subroutine. """ 2270 2271 # First write TIR_interface which interfaces MG5 with TIR. 2272 replace_dict=copy.copy(matrix_element.rep_dict) 2273 2274 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 2275 2276 # Check which loops have an Higgs effective vertex so as to correctly 2277 # implement CutTools limitation 2278 loop_groups = matrix_element.get('loop_groups') 2279 has_HEFT_vertex = [False]*len(loop_groups) 2280 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2281 for lamp in loop_amp_list: 2282 final_lwf = lamp.get_final_loop_wavefunction() 2283 while not final_lwf is None: 2284 # We define here an HEFT vertex as any vertex built up from 2285 # only massless vectors and scalars (at least one of each) 2286 scalars = len([1 for wf in final_lwf.get('mothers') if 2287 wf.get('spin')==1]) 2288 vectors = len([1 for wf in final_lwf.get('mothers') if 2289 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2290 if scalars>=1 and vectors>=1 and \ 2291 scalars+vectors == len(final_lwf.get('mothers')): 2292 has_HEFT_vertex[i] = True 2293 break 2294 final_lwf = final_lwf.get_loop_mother() 2295 else: 2296 continue 2297 break 2298 2299 has_HEFT_list = [] 2300 chunk_size = 9 2301 for k in xrange(0, len(has_HEFT_vertex), chunk_size): 2302 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2303 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2304 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2305 has_HEFT_vertex[k:k + chunk_size]]))) 2306 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2307 2308 file = file % replace_dict 2309 2310 FPR = q_polynomial.FortranPolynomialRoutines( 2311 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2312 sub_prefix=replace_dict['proc_prefix']) 2313 if self.tir_available_dict['pjfry']: 2314 file += '\n\n'+FPR.write_pjfry_mapping() 2315 if self.tir_available_dict['iregi']: 2316 file += '\n\n'+FPR.write_iregi_mapping() 2317 2318 if writer: 2319 writer.writelines(file,context=self.get_context(matrix_element)) 2320 else: 2321 return file
2322
2323 - def write_COLLIER_interface(self, writer, matrix_element):
2324 """ Create the file COLLIER_interface.f""" 2325 2326 # First write GOLEM_interface which interfaces MG5 with TIR. 2327 replace_dict=copy.copy(matrix_element.rep_dict) 2328 2329 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read() 2330 2331 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2332 coef_format=replace_dict['complex_dp_format'],\ 2333 sub_prefix=replace_dict['proc_prefix']) 2334 map_definition = [] 2335 collier_map = FPR.get_COLLIER_mapping() 2336 2337 chunk_size = 10 2338 for map_name, indices_list in \ 2339 [('COEFMAP_ZERO',[c[0] for c in collier_map]), 2340 ('COEFMAP_ONE',[c[1] for c in collier_map]), 2341 ('COEFMAP_TWO',[c[2] for c in collier_map]), 2342 ('COEFMAP_THREE',[c[3] for c in collier_map])]: 2343 for k in xrange(0, len(indices_list), chunk_size): 2344 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \ 2345 (map_name,k, min(k + chunk_size, len(indices_list))-1, 2346 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size]))) 2347 2348 replace_dict['collier_coefmap'] = '\n'.join(map_definition) 2349 2350 file = file % replace_dict 2351 2352 if writer: 2353 writer.writelines(file,context=self.get_context(matrix_element)) 2354 else: 2355 return file
2356
2357 - def write_GOLEM_interface(self, writer, matrix_element):
2358 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2359 defining the loop HELAS-like calls along with the general interfacing 2360 subroutine. """ 2361 2362 # First write GOLEM_interface which interfaces MG5 with TIR. 2363 replace_dict=copy.copy(matrix_element.rep_dict) 2364 2365 # We finalize TIR result differently wether we used the built-in 2366 # squaring against the born. 2367 if not self.get_context(matrix_element)['AmplitudeReduction']: 2368 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2369 else: 2370 replace_dict['loop_induced_sqsoindex']='' 2371 2372 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2373 2374 file = file % replace_dict 2375 2376 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2377 coef_format=replace_dict['complex_dp_format'],\ 2378 sub_prefix=replace_dict['proc_prefix']) 2379 2380 file += '\n\n'+FPR.write_golem95_mapping() 2381 2382 if writer: 2383 writer.writelines(file,context=self.get_context(matrix_element)) 2384 else: 2385 return file
2386
2387 - def write_polynomial_subroutines(self,writer,matrix_element):
2388 """ Subroutine to create all the subroutines relevant for handling 2389 the polynomials representing the loop numerator """ 2390 2391 # First create 'loop_max_coefs.inc' 2392 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2393 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2394 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2395 %matrix_element.rep_dict) 2396 2397 # Then coef_specs directly in DHELAS if it does not exist already 2398 # 'coef_specs.inc'. If several processes exported different files there, 2399 # it is fine because the overall maximum value will overwrite it in the 2400 # end 2401 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2402 if not os.path.isfile(coef_specs_path): 2403 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2404 IncWriter.writelines("""INTEGER MAXLWFSIZE 2405 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2406 INTEGER VERTEXMAXCOEFS 2407 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2408 %matrix_element.rep_dict) 2409 IncWriter.close() 2410 2411 # List of all subroutines to place there 2412 subroutines=[] 2413 2414 # Start from the routine in the template 2415 replace_dict = copy.copy(matrix_element.rep_dict) 2416 2417 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2418 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2419 # The double precision version of the basic polynomial routines, such as 2420 # create_loop_coefs 2421 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2422 replace_dict['real_format'] = replace_dict['real_dp_format'] 2423 replace_dict['mp_prefix'] = '' 2424 replace_dict['kind'] = 8 2425 replace_dict['zero_def'] = '0.0d0' 2426 replace_dict['one_def'] = '1.0d0' 2427 dp_routine = dp_routine % replace_dict 2428 # The quadruple precision version of the basic polynomial routines 2429 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2430 replace_dict['real_format'] = replace_dict['real_mp_format'] 2431 replace_dict['mp_prefix'] = 'MP_' 2432 replace_dict['kind'] = 16 2433 replace_dict['zero_def'] = '0.0e0_16' 2434 replace_dict['one_def'] = '1.0e0_16' 2435 mp_routine = mp_routine % replace_dict 2436 subroutines.append(dp_routine) 2437 subroutines.append(mp_routine) 2438 2439 # Initialize the polynomial routine writer 2440 poly_writer=q_polynomial.FortranPolynomialRoutines( 2441 matrix_element.get_max_loop_rank(), 2442 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2443 sub_prefix=replace_dict['proc_prefix'], 2444 proc_prefix=replace_dict['proc_prefix'], 2445 mp_prefix='') 2446 # Write the polynomial constant module common to all 2447 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2448 2449 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2450 matrix_element.get_max_loop_rank(), 2451 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2452 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2453 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2454 # The eval subroutine 2455 subroutines.append(poly_writer.write_polynomial_evaluator()) 2456 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2457 # The add coefs subroutine 2458 subroutines.append(poly_writer.write_add_coefs()) 2459 subroutines.append(mp_poly_writer.write_add_coefs()) 2460 # The merging one for creating the loop coefficients 2461 subroutines.append(poly_writer.write_wl_merger()) 2462 subroutines.append(mp_poly_writer.write_wl_merger()) 2463 for wl_update in matrix_element.get_used_wl_updates(): 2464 # We pick here the most appropriate way of computing the 2465 # tensor product depending on the rank of the two tensors. 2466 # The various choices below come out from a careful comparison of 2467 # the different methods using the valgrind profiler 2468 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2469 # If any of the rank is 0, or if they are both equal to 1, 2470 # then we are better off using the full expanded polynomial, 2471 # and let the compiler optimize it. 2472 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2473 wl_update[0],wl_update[1])) 2474 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2475 wl_update[0],wl_update[1])) 2476 elif wl_update[0] >= wl_update[1]: 2477 # If the loop polynomial is larger then we will filter and loop 2478 # over the vertex coefficients first. The smallest product for 2479 # which the routines below could be used is then 2480 # loop_rank_2 x vertex_rank_1 2481 subroutines.append(poly_writer.write_compact_wl_updater(\ 2482 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2483 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2484 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2485 else: 2486 # This happens only when the rank of the updater (vertex coef) 2487 # is larger than the one of the loop coef and none of them is 2488 # zero. This never happens in renormalizable theories but it 2489 # can happen in the HEFT ones or other effective ones. In this 2490 # case the typicaly use of this routine if for the product 2491 # loop_rank_1 x vertex_rank_2 2492 subroutines.append(poly_writer.write_compact_wl_updater(\ 2493 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2494 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2495 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2496 2497 writer.writelines('\n\n'.join(subroutines), 2498 context=self.get_context(matrix_element))
2499
2500 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2501 """Create the write_mp_compute_loop_coefs.f file.""" 2502 2503 if not matrix_element.get('processes') or \ 2504 not matrix_element.get('diagrams'): 2505 return 0 2506 2507 # Set lowercase/uppercase Fortran code 2508 2509 writers.FortranWriter.downcase = False 2510 2511 replace_dict = copy.copy(matrix_element.rep_dict) 2512 2513 # Extract helas calls 2514 squared_orders = matrix_element.get_squared_order_contribs() 2515 split_orders = matrix_element.get('processes')[0].get('split_orders') 2516 2517 born_ct_helas_calls , uvct_helas_calls = \ 2518 fortran_model.get_born_ct_helas_calls(matrix_element, 2519 squared_orders=squared_orders, split_orders=split_orders) 2520 self.turn_to_mp_calls(born_ct_helas_calls) 2521 self.turn_to_mp_calls(uvct_helas_calls) 2522 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2523 matrix_element,group_loops=self.group_loops, 2524 squared_orders=squared_orders,split_orders=split_orders) 2525 # The proc_prefix must be replaced 2526 coef_construction = [c % matrix_element.rep_dict for c 2527 in coef_construction] 2528 self.turn_to_mp_calls(coef_construction) 2529 self.turn_to_mp_calls(coef_merging) 2530 2531 file = open(os.path.join(self.template_dir,\ 2532 'mp_compute_loop_coefs.inc')).read() 2533 2534 # Setup the contextual environment which is used in the splitting 2535 # functions below 2536 context = self.get_context(matrix_element) 2537 file=self.split_HELASCALLS(writer,replace_dict,\ 2538 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2539 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2540 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2541 continue_label = 2000, 2542 momenta_array_name = 'MP_P', 2543 context=context) 2544 file=self.split_HELASCALLS(writer,replace_dict,\ 2545 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2546 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2547 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2548 continue_label = 3000, 2549 momenta_array_name = 'MP_P', 2550 context=context) 2551 file=self.split_HELASCALLS(writer,replace_dict,\ 2552 'mp_helas_calls_split.inc',file,coef_construction,\ 2553 'mp_coef_construction','mp_coef_construction', 2554 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2555 continue_label = 4000, 2556 momenta_array_name = 'MP_P', 2557 context=context) 2558 2559 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2560 2561 file = file % replace_dict 2562 2563 # Write the file 2564 writer.writelines(file,context=context)
2565
2566 - def write_color_matrix_data_file(self, writer, col_matrix):
2567 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2568 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2569 2570 res = [] 2571 for line in range(len(col_matrix._col_basis1)): 2572 numerators = [] 2573 denominators = [] 2574 for row in range(len(col_matrix._col_basis2)): 2575 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2576 numerators.append('%6r'%coeff[0].numerator) 2577 denominators.append('%6r'%( 2578 coeff[0].denominator*(-1 if coeff[1] else 1))) 2579 res.append(' '.join(numerators)) 2580 res.append(' '.join(denominators)) 2581 2582 res.append('EOF') 2583 2584 writer.writelines('\n'.join(res))
2585
2586 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2587 color_basis):
2588 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2589 list of the color_amplitudes in the argument of this function.""" 2590 2591 my_cs = color.ColorString() 2592 2593 res = [] 2594 2595 for jamp_number, coeff_list in enumerate(color_amplitudes): 2596 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2597 # Order the ColorString so that its ordering is canonical. 2598 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2599 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2600 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2601 # A line element is a tuple (numerator, denominator, amplitude_id) 2602 line_element = [] 2603 2604 for (coefficient, amp_number) in coeff_list: 2605 coef = self.cat_coeff(\ 2606 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2607 line_element.append((coef[0].numerator, 2608 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2609 # Sort them by growing amplitude number 2610 line_element.sort(key=lambda el:el[2]) 2611 2612 for i in range(3): 2613 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2614 2615 res.append('EOF') 2616 writer.writelines('\n'.join(res))
2617
2618 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2619 """Writes the file compute_color_flows.f which uses the AMPL results 2620 from a common block to project them onto the color flow space so as 2621 to compute the JAMP quantities. For loop induced processes, this file 2622 will also contain a subroutine computing AMPL**2 for madevent 2623 multichanneling.""" 2624 2625 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2626 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2627 2628 dat_writer = open(pjoin('..','MadLoop5_resources', 2629 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2630 %matrix_element.rep_dict),'w') 2631 self.write_color_flow_coefs_data_file(dat_writer, 2632 loop_col_amps, matrix_element.get('loop_color_basis')) 2633 dat_writer.close() 2634 2635 dat_writer = open(pjoin('..','MadLoop5_resources', 2636 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2637 %matrix_element.rep_dict),'w') 2638 self.write_color_matrix_data_file(dat_writer, 2639 matrix_element.get('color_matrix')) 2640 dat_writer.close() 2641 2642 if matrix_element.get('processes')[0].get('has_born'): 2643 born_col_amps = matrix_element.get_born_color_amplitudes() 2644 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2645 dat_writer = open(pjoin('..','MadLoop5_resources', 2646 '%(proc_prefix)sBornColorFlowCoefs.dat' 2647 %matrix_element.rep_dict),'w') 2648 self.write_color_flow_coefs_data_file(dat_writer, 2649 born_col_amps, matrix_element.get('born_color_basis')) 2650 dat_writer.close() 2651 2652 dat_writer = open(pjoin('..','MadLoop5_resources', 2653 '%(proc_prefix)sBornColorFlowMatrix.dat' 2654 %matrix_element.rep_dict),'w') 2655 self.write_color_matrix_data_file(dat_writer, 2656 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2657 dat_writer.close() 2658 else: 2659 matrix_element.rep_dict['nBornFlows'] = 0 2660 2661 replace_dict = copy.copy(matrix_element.rep_dict) 2662 2663 # The following variables only have to be defined for the LoopInduced 2664 # output for madevent. 2665 if self.get_context(matrix_element)['MadEventOutput']: 2666 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2667 else: 2668 replace_dict['config_map_definition'] = '' 2669 replace_dict['config_index_map_definition'] = '' 2670 replace_dict['nmultichannels'] = 0 2671 replace_dict['nmultichannel_configs'] = 0 2672 2673 # The nmultichannels entry will be used in the matrix<i> wrappers as 2674 # well, so we add it to the general_replace_dict too. 2675 matrix_element.rep_dict['nmultichannels'] = \ 2676 replace_dict['nmultichannels'] 2677 matrix_element.rep_dict['nmultichannel_configs'] = \ 2678 replace_dict['nmultichannel_configs'] 2679 2680 2681 file = open(os.path.join(self.template_dir,\ 2682 'compute_color_flows.inc')).read()%replace_dict 2683 2684 writer.writelines(file,context=self.get_context(matrix_element))
2685
2686 - def write_global_specs(self, matrix_element_list, output_path=None):
2687 """ From the list of matrix element, or the single matrix element, derive 2688 the global quantities to write in global_coef_specs.inc""" 2689 2690 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList, 2691 loop_helas_objects.LoopHelasProcess)): 2692 matrix_element_list = matrix_element_list.get_matrix_elements() 2693 2694 if isinstance(matrix_element_list, list): 2695 me_list = matrix_element_list 2696 else: 2697 me_list = [matrix_element_list] 2698 2699 if output_path is None: 2700 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc') 2701 else: 2702 out_path = output_path 2703 2704 open(out_path,'w').write( 2705 """ integer MAXNEXTERNAL 2706 parameter(MAXNEXTERNAL=%d) 2707 integer OVERALLMAXRANK 2708 parameter(OVERALLMAXRANK=%d) 2709 integer NPROCS 2710 parameter(NPROCS=%d)"""%( 2711 max(me.get_nexternal_ninitial()[0] for me in me_list), 2712 max(me.get_max_loop_rank() for me in me_list), 2713 len(me_list)))
2714 2715
2716 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2717 """ If processes with different maximum loop wavefunction size or 2718 different maximum loop vertex rank have to be output together, then 2719 the file 'coef.inc' in the HELAS Source folder must contain the overall 2720 maximum of these quantities. It is not safe though, and the user has 2721 been appropriatly warned at the output stage """ 2722 2723 # Remove the existing link 2724 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2725 'coef_specs.inc') 2726 os.remove(coef_specs_path) 2727 2728 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2729 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2730 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2731 overall_max_loop_vert_rank) 2732 # Replace it by the appropriate value 2733 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2734 IncWriter.writelines("""INTEGER MAXLWFSIZE 2735 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2736 INTEGER VERTEXMAXCOEFS 2737 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2738 %{'max_lwf_size':overall_max_lwf_size, 2739 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2740 IncWriter.close()
2741
2742 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2743 split_orders,squared_orders,amps_orders):
2744 """ Sets up the replacement dictionary for the writeout of the steering 2745 file check_sa.f""" 2746 if len(squared_orders)<1: 2747 matrix_element.rep_dict['print_so_loop_results']=\ 2748 "write(*,*) 'No split orders defined.'" 2749 elif len(squared_orders)==1: 2750 matrix_element.rep_dict['set_coupling_target']='' 2751 matrix_element.rep_dict['print_so_loop_results']=\ 2752 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2753 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2754 for i in range(len(split_orders))])) 2755 else: 2756 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2757 '# Here we leave the default target squared split order to -1, meaning that we'+ 2758 ' aim at computing all individual contributions. You can choose otherwise.', 2759 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2760 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2761 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2762 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2763 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2764 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2765 "ELSE", 2766 "write(*,*) ' > accuracy = NA'", 2767 "ENDIF", 2768 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2769 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2770 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2771 ]) for j, so in enumerate(squared_orders)]) 2772 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2773 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2774 ['\n'.join([ 2775 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2776 ['%d'%so_value for so_value in so])), 2777 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2778 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2779 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2780 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2781 ]) for j, so in enumerate(squared_orders)]) 2782 2783 # We must reconstruct here the born squared orders. 2784 squared_born_so_orders = [] 2785 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2786 for j in range(0,i+1): 2787 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2788 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2789 if not key in squared_born_so_orders: 2790 squared_born_so_orders.append(key) 2791 if len(squared_born_so_orders)<1: 2792 matrix_element.rep_dict['print_so_born_results'] = '' 2793 elif len(squared_born_so_orders)==1: 2794 matrix_element.rep_dict['print_so_born_results'] = \ 2795 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2796 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2797 for i in range(len(split_orders))])) 2798 else: 2799 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2800 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2801 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2802 for j, so in enumerate(squared_born_so_orders)]) 2803 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2804 ['\n'.join([ 2805 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2806 ['%d'%so_value for so_value in so])), 2807 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2808 ]) for j, so in enumerate(squared_born_so_orders)]) 2809 2810 # Add a bottom bar to both print_so_[loop|born]_results 2811 matrix_element.rep_dict['print_so_born_results'] += \ 2812 '\nwrite (*,*) "---------------------------------"' 2813 matrix_element.rep_dict['print_so_loop_results'] += \ 2814 '\nwrite (*,*) "---------------------------------"'
2815
2816 - def write_tir_cache_size_include(self, writer):
2817 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2818 cache the the user wishes to employ and the default value for it. 2819 This can have an impact on MadLoop speed when using stability checks 2820 but also impacts in a non-negligible way MadLoop's memory footprint. 2821 It is therefore important that the user can chose its size.""" 2822 2823 # For the standalone optimized output, a size of one is necessary. 2824 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2825 # speed increase with a TIR cache of size 2 due to the structure of the 2826 # calls to MadLoop there. 2827 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2828 writer.writelines(tir_cach_size)
2829
2830 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2831 write_auxiliary_files=True,):
2832 """Create the loop_matrix.f file.""" 2833 2834 if not matrix_element.get('processes') or \ 2835 not matrix_element.get('diagrams'): 2836 return 0 2837 2838 # Set lowercase/uppercase Fortran code 2839 writers.FortranWriter.downcase = False 2840 2841 # Starting off with the treatment of the split_orders since some 2842 # of the information extracted there will come into the 2843 # general_replace_dict. Split orders are abbreviated SO in all the 2844 # keys of the replacement dictionaries. 2845 2846 # Take care of the split_orders 2847 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2848 # Creating here a temporary list containing only the information of 2849 # what are the different squared split orders contributing 2850 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2851 sqso_contribs = [sqso[0] for sqso in squared_orders] 2852 split_orders = matrix_element.get('processes')[0].get('split_orders') 2853 # The entries set in the function below are only for check_sa written 2854 # out in write_loop__matrix_element_v4 (it is however placed here because the 2855 # split order information is only available here). 2856 self.setup_check_sa_replacement_dictionary(matrix_element, 2857 split_orders,sqso_contribs,amps_orders) 2858 2859 # Now recast the split order basis for the loop, born and counterterm 2860 # amplitude into one single splitorderbasis. 2861 overall_so_basis = list(set( 2862 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2863 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2864 # We must re-sort it to make sure it follows an increasing WEIGHT order 2865 order_hierarchy = matrix_element.get('processes')[0]\ 2866 .get('model').get('order_hierarchy') 2867 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2868 set(order_hierarchy.keys()): 2869 overall_so_basis.sort(key= lambda so: 2870 sum([order_hierarchy[split_orders[i]]*order_power for \ 2871 i, order_power in enumerate(so)])) 2872 2873 # Those are additional entries used throughout the different files of 2874 # MadLoop5 2875 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2876 matrix_element.rep_dict['nSO'] = len(split_orders) 2877 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2878 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2879 2880 writers.FortranWriter('nsquaredSO.inc').writelines( 2881 """INTEGER NSQUAREDSO 2882 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2883 2884 replace_dict = copy.copy(matrix_element.rep_dict) 2885 # Build the general array mapping the split orders indices to their 2886 # definition 2887 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2888 overall_so_basis,'AMPSPLITORDERS')) 2889 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2890 sqso_contribs,'SQPLITORDERS')) 2891 2892 # Specify what are the squared split orders selected by the proc def. 2893 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2894 matrix_element.get('processes')[0],sqso_contribs) 2895 2896 # Now we build the different arrays storing the split_orders ID of each 2897 # amp. 2898 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2899 for SO in amps_orders['loop_amp_orders']: 2900 for amp_number in SO[1]: 2901 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2902 2903 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2904 ampSO_list,'LOOPAMPORDERS')) 2905 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2906 for SO in amps_orders['born_amp_orders']: 2907 for amp_number in SO[1]: 2908 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2909 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2910 ampSO_list,'BORNAMPORDERS')) 2911 2912 # We then go to the TIR setup 2913 # The first entry is the CutTools, we make sure it is available 2914 looplibs_av=['.TRUE.'] 2915 # one should be careful about the order in the following as it must match 2916 # the ordering in MadLoopParamsCard. 2917 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']: 2918 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2919 self.tir_available_dict[tir_lib] else '.FALSE.') 2920 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2921 2922 # Helicity offset convention 2923 # For a given helicity, the attached integer 'i' means 2924 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2925 # to helicity number abs(i+HELOFFSET) 2926 # 'i' == -HELOFFSET -> Helicity is analytically zero 2927 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2928 # If it is zero, it is skipped. 2929 # Typically, the hel_offset is 10000 2930 replace_dict['hel_offset'] = 10000 2931 2932 # Extract overall denominator 2933 # Averaging initial state color, spin, and identical FS particles 2934 den_factor_line = self.get_den_factor_line(matrix_element) 2935 replace_dict['den_factor_line'] = den_factor_line 2936 2937 # When the user asks for the polarized matrix element we must 2938 # multiply back by the helicity averaging factor 2939 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2940 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2941 matrix_element.get_beams_hel_avg_factor() 2942 2943 if write_auxiliary_files: 2944 # Write out the color matrix 2945 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2946 CMWriter=open(pjoin('..','MadLoop5_resources', 2947 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2948 for ColorLine in CMNum: 2949 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2950 CMWriter.close() 2951 CMWriter=open(pjoin('..','MadLoop5_resources', 2952 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2953 for ColorLine in CMDenom: 2954 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2955 CMWriter.close() 2956 2957 # Write out the helicity configurations 2958 HelConfigs=matrix_element.get_helicity_matrix() 2959 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2960 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2961 for HelConfig in HelConfigs: 2962 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2963 HelConfigWriter.close() 2964 2965 # Extract helas calls 2966 born_ct_helas_calls, uvct_helas_calls = \ 2967 fortran_model.get_born_ct_helas_calls(matrix_element, 2968 squared_orders=squared_orders,split_orders=split_orders) 2969 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2970 matrix_element,group_loops=self.group_loops, 2971 squared_orders=squared_orders,split_orders=split_orders) 2972 2973 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2974 group_loops=self.group_loops, 2975 squared_orders=squared_orders, split_orders=split_orders) 2976 # The proc_prefix must be replaced 2977 coef_construction = [c % matrix_element.rep_dict for c 2978 in coef_construction] 2979 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 2980 2981 file = open(os.path.join(self.template_dir,\ 2982 'loop_matrix_standalone.inc')).read() 2983 2984 # Setup the contextual environment which is used in the splitting 2985 # functions below 2986 context = self.get_context(matrix_element) 2987 file=self.split_HELASCALLS(writer,replace_dict,\ 2988 'helas_calls_split.inc',file,born_ct_helas_calls,\ 2989 'born_ct_helas_calls','helas_calls_ampb', 2990 required_so_broadcaster = 'CT_REQ_SO_DONE', 2991 continue_label = 2000, context = context) 2992 file=self.split_HELASCALLS(writer,replace_dict,\ 2993 'helas_calls_split.inc',file,uvct_helas_calls,\ 2994 'uvct_helas_calls','helas_calls_uvct', 2995 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 2996 continue_label = 3000, context=context) 2997 file=self.split_HELASCALLS(writer,replace_dict,\ 2998 'helas_calls_split.inc',file,coef_construction,\ 2999 'coef_construction','coef_construction', 3000 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 3001 continue_label = 4000, context=context) 3002 file=self.split_HELASCALLS(writer,replace_dict,\ 3003 'helas_calls_split.inc',file,loop_CT_calls,\ 3004 'loop_CT_calls','loop_CT_calls', 3005 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 3006 continue_label = 5000, context=context) 3007 3008 # Add the entries above to the general_replace_dict so that it can be 3009 # used by write_mp_compute_loop_coefs later 3010 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 3011 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 3012 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 3013 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 3014 3015 replace_dict['coef_merging']='\n'.join(coef_merging) 3016 3017 file = file % replace_dict 3018 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \ 3019 loop_CT_calls)) 3020 if writer: 3021 # Write the file 3022 writer.writelines(file,context=context) 3023 return number_of_calls 3024 else: 3025 # Return it to be written along with the others 3026 return number_of_calls, file
3027 3028 #=============================================================================== 3029 # LoopProcessExporterFortranSA 3030 #===============================================================================
3031 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 3032 export_v4.ProcessExporterFortranMatchBox):
3033 """Class to take care of exporting a set of loop matrix elements in the 3034 Fortran format.""" 3035 3036 default_opt = {'clean': False, 'complex_mass':False, 3037 'export_format':'madloop_matchbox', 'mp':True, 3038 'loop_dir':'', 'cuttools_dir':'', 3039 'fortran_compiler':'gfortran', 3040 'output_dependencies':'external', 3041 'sa_symmetry':True} 3042 3043 3044
3045 - def get_color_string_lines(self, matrix_element):
3046 """Return the color matrix definition lines for this matrix element. Split 3047 rows in chunks of size n.""" 3048 3049 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
3050 3051
3052 - def get_JAMP_lines(self, *args, **opts):
3053 """Adding leading color part of the colorflow""" 3054 3055 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
3056
3057 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
3058 """ To not mix notations between borns and virtuals we call it here also MG5 """ 3059 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
3060 3061 3062 #=============================================================================== 3063 # LoopInducedExporter 3064 #===============================================================================
3065 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
3066 """ A class to specify all the functions common to LoopInducedExporterMEGroup 3067 and LoopInducedExporterMENoGroup (but not relevant for the original 3068 Madevent exporters)""" 3069 3070 madloop_makefile_name = 'makefile_MadLoop' 3071 3072
3073 - def __init__(self, *args, **opts):
3074 """ Initialize the process, setting the proc characteristics.""" 3075 super(LoopInducedExporterME, self).__init__(*args, **opts) 3076 self.proc_characteristic['loop_induced'] = True
3077
3078 - def get_context(self,*args,**opts):
3079 """ Make sure that the contextual variable MadEventOutput is set to 3080 True for this exporter""" 3081 3082 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 3083 context['MadEventOutput'] = True 3084 return context
3085 3086 #=========================================================================== 3087 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 3088 #===========================================================================
3089 - def write_procdef_mg5(self, file_pos, modelname, process_str):
3090 """ write an equivalent of the MG4 proc_card in order that all the Madevent 3091 Perl script of MadEvent4 are still working properly for pure MG5 run. 3092 Not needed for StandAlone so we need to call the correct one 3093 """ 3094 3095 return export_v4.ProcessExporterFortran.write_procdef_mg5( 3096 self, file_pos, modelname, process_str)
3097
3098 - def get_source_libraries_list(self):
3099 """ Returns the list of libraries to be compiling when compiling the 3100 SOURCE directory. It is different for loop_induced processes and 3101 also depends on the value of the 'output_dependencies' option""" 3102 3103 libraries_list = super(LoopInducedExporterME,self).\ 3104 get_source_libraries_list() 3105 3106 if self.dependencies=='internal': 3107 libraries_list.append('$(LIBDIR)libcts.$(libext)') 3108 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 3109 3110 return libraries_list
3111 3118
3119 - def copy_template(self, *args, **opts):
3120 """Pick the right mother functions 3121 """ 3122 # Call specifically the necessary building functions for the mixed 3123 # template setup for both MadEvent and MadLoop standalone 3124 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 3125 copy_Source_makefile=False) 3126 3127 LoopProcessOptimizedExporterFortranSA.\ 3128 loop_optimized_additional_template_setup(self)
3129 3130 3131 #=========================================================================== 3132 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 3133 #===========================================================================
3134 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3135 """Function to finalize v4 directory, for inheritance. 3136 """ 3137 3138 self.proc_characteristic['loop_induced'] = True 3139 3140 # This can be uncommented if one desires to have the MadLoop 3141 # initialization performed at the end of the output phase. 3142 # Alternatively, one can simply execute the command 'initMadLoop' in 3143 # the madevent interactive interface after the output. 3144 # from madgraph.interface.madevent_interface import MadLoopInitializer 3145 # MadLoopInitializer.init_MadLoop(self.dir_path, 3146 # subproc_prefix=self.SubProc_prefix, MG_options=None) 3147 3148 self.write_global_specs(matrix_elements)
3149
3150 - def write_tir_cache_size_include(self, writer):
3151 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 3152 cache the the user wishes to employ and the default value for it. 3153 This can have an impact on MadLoop speed when using stability checks 3154 but also impacts in a non-negligible way MadLoop's memory footprint. 3155 It is therefore important that the user can chose its size.""" 3156 3157 # In this case of MadLoop+MadEvent output, we set it to 2 because we 3158 # gain further speed increase with a TIR cache of size 2 due to the 3159 # the fact that we call MadLoop once per helicity configuration in this 3160 # case. 3161 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 3162 writer.writelines(tir_cach_size)
3163
3164 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 3165 proc_id = None, config_map = [], subproc_number = None):
3166 """ Write it the wrapper to call the ML5 subroutine in the library.""" 3167 3168 # Generating the MadEvent wrapping ME's routines 3169 if not matrix_element.get('processes') or \ 3170 not matrix_element.get('diagrams'): 3171 return 0 3172 3173 if not isinstance(writer, writers.FortranWriter): 3174 raise writers.FortranWriter.FortranWriterError(\ 3175 "writer not FortranWriter") 3176 3177 replace_dict = copy.copy(matrix_element.rep_dict) 3178 3179 # Extract version number and date from VERSION file 3180 info_lines = self.get_mg5_info_lines() 3181 replace_dict['info_lines'] = info_lines 3182 3183 # Extract process info lines 3184 process_lines = self.get_process_info_lines(matrix_element) 3185 replace_dict['process_lines'] = process_lines 3186 3187 # Set proc_id 3188 # It can be set to None when write_matrix_element_v4 is called without 3189 # grouping. In this case the subroutine SMATRIX should take an empty 3190 # suffix. 3191 if proc_id is None: 3192 replace_dict['proc_id'] = '' 3193 else: 3194 replace_dict['proc_id'] = proc_id 3195 3196 #set the average over the number of initial helicities 3197 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 3198 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 3199 matrix_element.get_beams_hel_avg_factor() 3200 3201 # Extract helicity lines 3202 helicity_lines = self.get_helicity_lines(matrix_element) 3203 replace_dict['helicity_lines'] = helicity_lines 3204 3205 3206 # Extract ndiags 3207 ndiags = len(matrix_element.get('diagrams')) 3208 replace_dict['ndiags'] = ndiags 3209 3210 # Set define_iconfigs_lines 3211 replace_dict['define_iconfigs_lines'] = \ 3212 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 3213 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 3214 3215 if proc_id: 3216 # Set lines for subprocess group version 3217 # Set define_iconfigs_lines 3218 replace_dict['define_iconfigs_lines'] += \ 3219 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3220 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3221 # Set set_amp2_line 3222 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 3223 else: 3224 # Standard running 3225 # Set set_amp2_line 3226 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 3227 3228 # If group_numer 3229 replace_dict['ml_prefix'] = \ 3230 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 3231 3232 # Extract ncolor 3233 ncolor = max(1, len(matrix_element.get('color_basis'))) 3234 replace_dict['ncolor'] = ncolor 3235 3236 n_tot_diags = len(matrix_element.get_loop_diagrams()) 3237 replace_dict['n_tot_diags'] = n_tot_diags 3238 3239 file = open(pjoin(_file_path, \ 3240 'iolibs/template_files/%s' % self.matrix_file)).read() 3241 file = file % replace_dict 3242 3243 # Write the file 3244 writer.writelines(file) 3245 3246 return 0, ncolor
3247
3248 - def get_amp2_lines(self, *args, **opts):
3249 """Make sure the function is implemented in the daughters""" 3250 3251 raise NotImplemented, 'The function get_amp2_lines must be called in '+\ 3252 ' the daugthers of LoopInducedExporterME'
3253 3254 #=============================================================================== 3255 # LoopInducedExporterMEGroup 3256 #===============================================================================
3257 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 3258 export_v4.ProcessExporterFortranMEGroup):
3259 """Class to take care of exporting a set of grouped loop induced matrix 3260 elements""" 3261 3262 matrix_file = "matrix_loop_induced_madevent_group.inc" 3263 3269
3270 - def write_source_makefile(self, *args, **opts):
3271 """Pick the correct write_source_makefile function from 3272 ProcessExporterFortranMEGroup""" 3273 3274 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 3275 *args, **opts)
3276
3277 - def copy_template(self, *args, **opts):
3278 """Pick the right mother functions 3279 """ 3280 # Call specifically the necessary building functions for the mixed 3281 # template setup for both MadEvent and MadLoop standalone 3282 3283 # Start witht the MadEvent one 3284 export_v4.ProcessExporterFortranMEGroup.copy_template(self,*args,**opts) 3285 3286 # Then the MadLoop-standalone related one 3287 LoopInducedExporterME.copy_template(self, *args, **opts)
3288
3289 - def finalize(self, *args, **opts):
3290 """Pick the right mother functions 3291 """ 3292 # Call specifically what finalize_v4_directory must be used, so that the 3293 # MRO doesn't interfere. 3294 3295 self.proc_characteristic['loop_induced'] = True 3296 3297 export_v4.ProcessExporterFortranMEGroup.finalize(self,*args,**opts) 3298 3299 # And the finilize from LoopInducedExporterME which essentially takes 3300 # care of MadLoop virtuals initialization 3301 LoopInducedExporterME.finalize(self,*args,**opts)
3302
3303 - def generate_subprocess_directory(self, subproc_group, 3304 fortran_model,group_number):
3305 """Generate the Pn directory for a subprocess group in MadEvent, 3306 including the necessary matrix_N.f files, configs.inc and various 3307 other helper files""" 3308 3309 # Generate the MadLoop files 3310 calls = 0 3311 matrix_elements = subproc_group.get('matrix_elements') 3312 for ime, matrix_element in enumerate(matrix_elements): 3313 self.unique_id +=1 3314 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 3315 group_number = group_number, proc_id = str(ime+1), 3316 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 3317 config_map = subproc_group.get('diagram_maps')[ime], 3318 unique_id=self.unique_id) 3319 3320 # Then generate the MadEvent files 3321 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory( 3322 self, subproc_group,fortran_model,group_number) 3323 3324 return calls
3325
3326 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3327 """Return the various replacement dictionary inputs necessary for the 3328 multichanneling amp2 definition for the loop-induced MadEvent output. 3329 """ 3330 3331 if not config_map: 3332 raise MadGraph5Error, 'A multi-channeling configuration map is '+\ 3333 ' necessary for the MadEvent Loop-induced output with grouping.' 3334 3335 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3336 3337 ret_lines = [] 3338 # In this case, we need to sum up all amplitudes that have 3339 # identical topologies, as given by the config_map (which 3340 # gives the topology/config for each of the diagrams 3341 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement): 3342 diagrams = matrix_element.get_loop_diagrams() 3343 else: 3344 diagrams = matrix_element.get('diagrams') 3345 3346 # Note that we need to use AMP2 number corresponding to the first 3347 # diagram number used for that AMP2. 3348 # The dictionary below maps the config ID to this corresponding first 3349 # diagram number 3350 config_index_map = {} 3351 # For each diagram number, the dictionary below gives the config_id it 3352 # belongs to or 0 if it doesn't belong to any. 3353 loop_amp_ID_to_config = {} 3354 3355 # Combine the diagrams with identical topologies 3356 config_to_diag_dict = {} 3357 for idiag, diag in enumerate(diagrams): 3358 try: 3359 config_to_diag_dict[config_map[idiag]].append(idiag) 3360 except KeyError: 3361 config_to_diag_dict[config_map[idiag]] = [idiag] 3362 3363 for config in sorted(config_to_diag_dict.keys()): 3364 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3365 3366 # First add the UV and R2 counterterm amplitudes of each selected 3367 # diagram for the multichannel config 3368 CT_amp_numbers = [a.get('number') for a in \ 3369 sum([diagrams[idiag].get_ct_amplitudes() for \ 3370 idiag in config_to_diag_dict[config]], [])] 3371 3372 for CT_amp_number in CT_amp_numbers: 3373 loop_amp_ID_to_config[CT_amp_number] = config 3374 3375 # Now add here the loop amplitudes. 3376 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3377 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3378 idiag in config_to_diag_dict[config]], [])] 3379 3380 for loop_amp_number in loop_amp_numbers: 3381 loop_amp_ID_to_config[loop_amp_number] = config 3382 3383 # Notice that the config_id's are not necessarily sequential here, so 3384 # the size of the config_index_map array has to be the maximum over all 3385 # config_ids. 3386 # config_index_map should never be empty unless there was no diagram, 3387 # so the expression below is ok. 3388 n_configs = max(config_index_map.keys()) 3389 replace_dict['nmultichannel_configs'] = n_configs 3390 3391 # We must fill the empty entries of the map with the dummy amplitude 3392 # number 0. 3393 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3394 for i in range(1,n_configs+1)] 3395 # Now the placeholder 'nmultichannels' refers to the number of 3396 # multi-channels which are contributing, so we must filter out zeros. 3397 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3398 3399 # Now write the amp2 related inputs in the replacement dictionary 3400 res_list = [] 3401 chunk_size = 6 3402 for k in xrange(0, len(conf_list), chunk_size): 3403 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3404 (k + 1, min(k + chunk_size, len(conf_list)), 3405 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3406 3407 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3408 3409 res_list = [] 3410 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3411 amp_list = [loop_amp_ID_to_config[i] for i in \ 3412 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3413 chunk_size = 6 3414 for k in xrange(0, len(amp_list), chunk_size): 3415 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3416 (k + 1, min(k + chunk_size, len(amp_list)), 3417 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3418 3419 replace_dict['config_map_definition'] = '\n'.join(res_list) 3420 3421 return
3422 3423 #=============================================================================== 3424 # LoopInducedExporterMENoGroup 3425 #===============================================================================
3426 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3427 export_v4.ProcessExporterFortranME):
3428 """Class to take care of exporting a set of individual loop induced matrix 3429 elements""" 3430 3431 matrix_file = "matrix_loop_induced_madevent.inc" 3432 3438
3439 - def write_source_makefile(self, *args, **opts):
3440 """Pick the correct write_source_makefile function from 3441 ProcessExporterFortran""" 3442 3443 super(export_v4.ProcessExporterFortranME,self).\ 3444 write_source_makefile(*args, **opts)
3445
3446 - def copy_template(self, *args, **opts):
3447 """Pick the right mother functions 3448 """ 3449 # Call specifically the necessary building functions for the mixed 3450 # template setup for both MadEvent and MadLoop standalone 3451 3452 # Start witht the MadEvent one 3453 export_v4.ProcessExporterFortranME.copy_template(self,*args,**opts) 3454 3455 # Then the MadLoop-standalone related one 3456 LoopInducedExporterME.copy_template(self, *args, **opts)
3457
3458 - def finalize(self, *args, **opts):
3459 """Pick the right mother functions 3460 """ 3461 3462 self.proc_characteristic['loop_induced'] = True 3463 # Call specifically what finalize must be used, so that the 3464 # MRO doesn't interfere. 3465 export_v4.ProcessExporterFortranME.finalize(self, *args, **opts) 3466 3467 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3468 # care of MadLoop virtuals initialization 3469 LoopInducedExporterME.finalize(self, *args, **opts)
3470
3471 - def generate_subprocess_directory(self, matrix_element, fortran_model, me_number):
3472 """Generate the Pn directory for a subprocess group in MadEvent, 3473 including the necessary matrix_N.f files, configs.inc and various 3474 other helper files""" 3475 3476 self.unique_id += 1 3477 # Then generate the MadLoop files 3478 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3479 group_number = me_number, 3480 unique_id=self.unique_id) 3481 3482 3483 # First generate the MadEvent files 3484 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory( 3485 self, matrix_element, fortran_model, me_number) 3486 return calls
3487
3488 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3489 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3490 3491 if config_map: 3492 raise MadGraph5Error, 'A configuration map should not be specified'+\ 3493 ' for the Loop induced exporter without grouping.' 3494 3495 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3496 # Get minimum legs in a vertex 3497 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3498 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3499 minvert = min(vert_list) if vert_list!=[] else 0 3500 3501 # Note that we need to use AMP2 number corresponding to the first 3502 # diagram number used for that AMP2. 3503 # The dictionary below maps the config ID to this corresponding first 3504 # diagram number 3505 config_index_map = {} 3506 # For each diagram number, the dictionary below gives the config_id it 3507 # belongs to or 0 if it doesn't belong to any. 3508 loop_amp_ID_to_config = {} 3509 3510 n_configs = 0 3511 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3512 # Ignore any diagrams with 4-particle vertices. 3513 use_for_multichanneling = True 3514 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3515 use_for_multichanneling = False 3516 curr_config = 0 3517 else: 3518 n_configs += 1 3519 curr_config = n_configs 3520 3521 if not use_for_multichanneling: 3522 if 0 not in config_index_map: 3523 config_index_map[0] = idiag + 1 3524 else: 3525 config_index_map[curr_config] = idiag + 1 3526 3527 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3528 for CT_amp in CT_amps: 3529 loop_amp_ID_to_config[CT_amp] = curr_config 3530 3531 Loop_amps = [a.get('amplitudes')[0].get('number') 3532 for a in diag.get_loop_amplitudes()] 3533 for Loop_amp in Loop_amps: 3534 loop_amp_ID_to_config[Loop_amp] = curr_config 3535 3536 # Now write the amp2 related inputs in the replacement dictionary 3537 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3538 replace_dict['nmultichannel_configs'] = n_configs 3539 # Now the placeholder 'nmultichannels' refers to the number of 3540 # multi-channels which are contributing which, in the non-grouped case 3541 # is always equal to the total number of multi-channels. 3542 replace_dict['nmultichannels'] = n_configs 3543 3544 res_list = [] 3545 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3546 if i!=0] 3547 chunk_size = 6 3548 for k in xrange(0, len(conf_list), chunk_size): 3549 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3550 (k + 1, min(k + chunk_size, len(conf_list)), 3551 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3552 3553 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3554 3555 res_list = [] 3556 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3557 amp_list = [loop_amp_ID_to_config[i] for i in \ 3558 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3559 chunk_size = 6 3560 for k in xrange(0, len(amp_list), chunk_size): 3561 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3562 (k + 1, min(k + chunk_size, len(amp_list)), 3563 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3564 3565 replace_dict['config_map_definition'] = '\n'.join(res_list)
3566