Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  import copy 
  18  import fractions 
  19  import glob 
  20  import logging 
  21  import os 
  22  import stat 
  23  import sys 
  24  import re 
  25  import shutil 
  26  import subprocess 
  27  import itertools 
  28  import time 
  29  import datetime 
  30   
  31   
  32  import aloha 
  33   
  34  import madgraph.core.base_objects as base_objects 
  35  import madgraph.core.color_algebra as color 
  36  import madgraph.core.helas_objects as helas_objects 
  37  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  38  import madgraph.iolibs.drawing_eps as draw 
  39  import madgraph.iolibs.files as files 
  40  import madgraph.iolibs.group_subprocs as group_subprocs 
  41  import madgraph.various.banner as banner_mod 
  42  import madgraph.various.misc as misc 
  43  import madgraph.various.q_polynomial as q_polynomial 
  44  import madgraph.iolibs.file_writers as writers 
  45  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  46  import madgraph.iolibs.template_files as template_files 
  47  import madgraph.iolibs.ufo_expression_parsers as parsers 
  48  import madgraph.iolibs.export_v4 as export_v4 
  49  import madgraph.various.diagram_symmetry as diagram_symmetry 
  50  import madgraph.various.process_checks as process_checks 
  51  import madgraph.various.progressbar as pbar 
  52  import madgraph.various.q_polynomial as q_polynomial 
  53  import madgraph.core.color_amp as color_amp 
  54  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  55  import models.check_param_card as check_param_card 
  56  from madgraph.loop.loop_base_objects import LoopDiagram 
  57  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  58   
  59   
  60   
  61  pjoin = os.path.join 
  62   
  63  import aloha.create_aloha as create_aloha 
  64  import models.write_param_card as param_writer 
  65  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  66  from madgraph.iolibs.files import cp, ln, mv 
  67  pjoin = os.path.join 
  68  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  69  logger = logging.getLogger('madgraph.loop_exporter') 
  70   
  71  #=============================================================================== 
  72  # LoopExporterFortran 
  73  #=============================================================================== 
74 -class LoopExporterFortran(object):
75 """ Class to define general helper functions to the different 76 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 77 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 78 It plays the same role as ProcessExporterFrotran and simply defines here 79 loop-specific helpers functions necessary for all loop exporters. 80 Notice that we do not have LoopExporterFortran inheriting from 81 ProcessExporterFortran but give access to arguments like dir_path and 82 clean using options. This avoids method resolution object ambiguity""" 83 84 default_opt = dict(export_v4.ProcessExporterFortran.default_opt) 85 default_opt.update({'clean': False, 'complex_mass':False, 86 'export_format':'madloop', 'mp':True, 87 'loop_dir':'', 'cuttools_dir':'', 88 'fortran_compiler':'gfortran', 89 'SubProc_prefix': 'P', 90 'output_dependencies': 'external', 91 'compute_color_flows': False, 92 'mode':''}) 93 94 include_names = {'ninja' : 'mninja.mod', 95 'golem' : 'generic_function_1p.mod', 96 'samurai':'msamurai.mod', 97 'collier': 'collier.mod'} 98
99 - def __init__(self, dir_path = "", opt=None):
100 """Initiate the LoopExporterFortran with directory information on where 101 to find all the loop-related source files, like CutTools""" 102 103 104 self.opt = dict(self.default_opt) 105 if opt: 106 self.opt.update(opt) 107 108 self.SubProc_prefix = self.opt['SubProc_prefix'] 109 self.loop_dir = self.opt['loop_dir'] 110 self.cuttools_dir = self.opt['cuttools_dir'] 111 self.fortran_compiler = self.opt['fortran_compiler'] 112 self.dependencies = self.opt['output_dependencies'] 113 self.compute_color_flows = self.opt['compute_color_flows'] 114 115 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
116 117 191
192 - def get_aloha_model(self, model):
193 """ Caches the aloha model created here as an attribute of the loop 194 exporter so that it can later be used in the LoopHelasMatrixElement 195 in the function compute_all_analytic_information for recycling aloha 196 computations across different LoopHelasMatrixElements steered by the 197 same loop exporter. 198 """ 199 if not hasattr(self, 'aloha_model'): 200 self.aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) 201 return self.aloha_model
202 203 #=========================================================================== 204 # write the multiple-precision header files 205 #===========================================================================
206 - def write_mp_files(self, writer_mprec, writer_mpc):
207 """Write the cts_mprec.h and cts_mpc.h""" 208 209 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 210 writer_mprec.writelines(file) 211 212 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 213 file = file.replace('&','') 214 writer_mpc.writelines(file) 215 216 return True
217 218 #=============================================================================== 219 # LoopProcessExporterFortranSA 220 #===============================================================================
221 -class LoopProcessExporterFortranSA(LoopExporterFortran, 222 export_v4.ProcessExporterFortranSA):
223 224 """Class to take care of exporting a set of loop matrix elements in the 225 Fortran format.""" 226 227 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 228 madloop_makefile_name = 'makefile' 229 230 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 231 style='classic2', color='green', 232 top_frame_char = '=', bottom_frame_char = '=', 233 left_frame_char = '{',right_frame_char = '}', 234 print_frame=True, side_margin = 7, up_margin = 1) 235
236 - def __init__(self, *args, **opts):
237 super(LoopProcessExporterFortranSA,self).__init__(*args,**opts) 238 self.unique_id=0 # to allow collier to distinguish the various loop subprocesses 239 self.has_loop_induced = False
240
241 - def copy_template(self, model):
242 """Additional actions needed to setup the Template. 243 """ 244 super(LoopProcessExporterFortranSA, self).copy_template(model) 245 246 self.loop_additional_template_setup()
247
248 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
249 """create the global information for loops""" 250 251 super(LoopProcessExporterFortranSA,self).finalize(matrix_element, 252 cmdhistory, MG5options, outputflag) 253 254 255 MLCard = banner_mod.MadLoopParam(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')) 256 # For loop-induced processes and *only* when summing over all helicity configurations 257 # (which is the default for standalone usage), COLLIER is faster than Ninja. 258 if self.has_loop_induced: 259 MLCard['MLReductionLib'] = "7|6|1" 260 # Computing the poles with COLLIER also unnecessarily slows down the code 261 # It should only be set to True for checks and it's acceptable to remove them 262 # here because for loop-induced processes they should be zero anyway. 263 # We keep it active for non-loop induced processes because COLLIER is not the 264 # main reduction tool in that case, and the poles wouldn't be zero then 265 MLCard['COLLIERComputeUVpoles'] = False 266 MLCard['COLLIERComputeIRpoles'] = False 267 268 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams_default.dat')) 269 MLCard.write(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat'))
270
271 - def write_f2py_makefile(self):
272 return
273
274 - def write_f2py_check_sa(self, matrix_element, output_path):
275 """ Write the general check_sa.py in SubProcesses that calls all processes successively.""" 276 277 # No need to further edit this file for now. 278 file = open(os.path.join(self.template_dir,\ 279 'check_sa_all.py.inc')).read() 280 open(output_path,'w').writelines(file) 281 # Make it executable 282 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
283 284
285 - def write_f2py_splitter(self):
286 """write a function to call the correct matrix element""" 287 288 template = """ 289 %(python_information)s 290 291 SUBROUTINE INITIALISE(PATH) 292 C ROUTINE FOR F2PY to read the benchmark point. 293 IMPLICIT NONE 294 CHARACTER*512 PATH 295 CF2PY INTENT(IN) :: PATH 296 CALL SETPARA(PATH) !first call to setup the paramaters 297 RETURN 298 END 299 300 SUBROUTINE SET_MADLOOP_PATH(PATH) 301 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop 302 CHARACTER(512) PATH 303 CF2PY intent(in)::path 304 CALL SETMADLOOPPATH(PATH) 305 END 306 307 subroutine smatrixhel(pdgs, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE) 308 IMPLICIT NONE 309 310 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p 311 CF2PY integer, intent(in), dimension(npdg) :: pdgs 312 CF2PY integer, intent(in) :: npdg 313 CF2PY double precision, intent(out) :: ANS 314 CF2PY integer, intent(out) :: RETURNCODE 315 CF2PY double precision, intent(in) :: ALPHAS 316 CF2PY double precision, intent(in) :: SCALES2 317 318 integer pdgs(*) 319 integer npdg, nhel, RETURNCODE 320 double precision p(*) 321 double precision ANS, ALPHAS, PI,SCALES2 322 323 %(smatrixhel)s 324 325 return 326 end 327 328 subroutine get_pdg_order(OUT) 329 IMPLICIT NONE 330 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i) 331 332 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i) 333 DATA PDGS/ %(pdgs)s / 334 OUT=PDGS 335 RETURN 336 END 337 338 subroutine get_prefix(PREFIX) 339 IMPLICIT NONE 340 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i) 341 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i) 342 DATA PREF / '%(prefix)s'/ 343 PREFIX = PREF 344 RETURN 345 END 346 347 """ 348 349 allids = self.prefix_info.keys() 350 allprefix = [self.prefix_info[key][0] for key in allids] 351 min_nexternal = min([len(ids) for ids in allids]) 352 max_nexternal = max([len(ids) for ids in allids]) 353 354 info = [] 355 for key, (prefix, tag) in self.prefix_info.items(): 356 info.append('#PY %s : %s # %s' % (tag, key, prefix)) 357 358 359 text = [] 360 for n_ext in range(min_nexternal, max_nexternal+1): 361 current = [ids for ids in allids if len(ids)==n_ext] 362 if not current: 363 continue 364 if min_nexternal != max_nexternal: 365 if n_ext == min_nexternal: 366 text.append(' if (npdg.eq.%i)then' % n_ext) 367 else: 368 text.append(' else if (npdg.eq.%i)then' % n_ext) 369 for ii,pdgs in enumerate(current): 370 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)]) 371 if ii==0: 372 text.append( ' if(%s) then ! %i' % (condition, i)) 373 else: 374 text.append( ' else if(%s) then ! %i' % (condition,i)) 375 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[pdgs][0]) 376 text.append(' endif') 377 #close the function 378 if min_nexternal != max_nexternal: 379 text.append('endif') 380 381 formatting = {'python_information':'\n'.join(info), 382 'smatrixhel': '\n'.join(text), 383 'maxpart': max_nexternal, 384 'nb_me': len(allids), 385 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0' 386 for i in range(max_nexternal) \ 387 for pdg in allids]), 388 'prefix':'\',\''.join(allprefix) 389 } 390 391 392 text = template % formatting 393 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w') 394 fsock.writelines(text) 395 fsock.close()
396 397 398
399 - def loop_additional_template_setup(self, copy_Source_makefile = True):
400 """ Perform additional actions specific for this class when setting 401 up the template with the copy_template function.""" 402 403 # We must change some files to their version for NLO computations 404 cpfiles= ["Cards/MadLoopParams.dat", 405 "SubProcesses/MadLoopParamReader.f", 406 "SubProcesses/MadLoopParams.inc"] 407 if copy_Source_makefile: 408 cpfiles.append("Source/makefile") 409 410 for file in cpfiles: 411 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 412 os.path.join(self.dir_path, file)) 413 414 ln(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), pjoin(self.dir_path,'SubProcesses')) 415 416 # We might need to give a different name to the MadLoop makefile 417 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 418 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 419 420 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 421 # for the non-optimized output 422 link_tir_libs=[] 423 tir_libs=[] 424 425 filePath = pjoin(self.dir_path, 'SubProcesses', 426 'MadLoop_makefile_definitions') 427 calls = self.write_loop_makefile_definitions( 428 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 429 430 # We need minimal editing of MadLoopCommons.f 431 # For the optimized output, this file will be overwritten once the 432 # availability of COLLIER has been determined. 433 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 434 "SubProcesses","MadLoopCommons.inc")).read() 435 writer = writers.FortranWriter(os.path.join(self.dir_path, 436 "SubProcesses","MadLoopCommons.f")) 437 writer.writelines(MadLoopCommon%{ 438 'print_banner_commands':self.MadLoop_banner}, context={ 439 'collier_available':False}) 440 writer.close() 441 442 # Copy the whole MadLoop5_resources directory (empty at this stage) 443 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 444 'MadLoop5_resources')): 445 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 446 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 447 448 # Link relevant cards from Cards inside the MadLoop5_resources 449 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 450 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 451 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 452 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 453 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 454 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 455 456 # And remove check_sa in the SubProcess folder since now there is a 457 # check_sa tailored to each subprocess. 458 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 459 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 460 461 cwd = os.getcwd() 462 dirpath = os.path.join(self.dir_path, 'SubProcesses') 463 try: 464 os.chdir(dirpath) 465 except os.error: 466 logger.error('Could not cd to directory %s' % dirpath) 467 return 0 468 469 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 470 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 471 writers.FortranWriter('cts_mpc.h')) 472 473 # Return to original PWD 474 os.chdir(cwd) 475 476 # We must link the CutTools to the Library folder of the active Template 477 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
478 479 # This function is placed here and not in optimized exporterd, 480 # because the same makefile.inc should be used in all cases.
481 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 482 tir_libs,tir_include=[]):
483 """ Create the file makefile which links to the TIR libraries.""" 484 485 file = open(os.path.join(self.loop_dir,'StandAlone', 486 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 487 replace_dict={} 488 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 489 replace_dict['tir_libs']=' '.join(tir_libs) 490 replace_dict['dotf']='%.f' 491 replace_dict['prefix']= self.SubProc_prefix 492 replace_dict['doto']='%.o' 493 replace_dict['tir_include']=' '.join(tir_include) 494 file=file%replace_dict 495 if writer: 496 writer.writelines(file) 497 else: 498 return file
499
500 - def convert_model(self, model, wanted_lorentz = [], 501 wanted_couplings = []):
502 """ Caches the aloha model created here when writing out the aloha 503 fortran subroutine. 504 """ 505 self.get_aloha_model(model) 506 super(LoopProcessExporterFortranSA, self).convert_model(model, 507 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
508
509 - def get_ME_identifier(self, matrix_element, 510 group_number = None, group_elem_number = None):
511 """ A function returning a string uniquely identifying the matrix 512 element given in argument so that it can be used as a prefix to all 513 MadLoop5 subroutines and common blocks related to it. This allows 514 to compile several processes into one library as requested by the 515 BLHA (Binoth LesHouches Accord) guidelines. 516 The arguments group_number and proc_id are just for the LoopInduced 517 output with MadEvent.""" 518 519 # When disabling the loop grouping in the LoopInduced MadEvent output, 520 # we have only the group_number set and the proc_id set to None. In this 521 # case we don't print the proc_id. 522 if (not group_number is None) and group_elem_number is None: 523 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 524 group_number) 525 elif group_number is None or group_elem_number is None: 526 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 527 else: 528 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 529 group_number, group_elem_number)
530
531 - def get_SubProc_folder_name(self, process, 532 group_number = None, group_elem_number = None):
533 """Returns the name of the SubProcess directory, which can contain 534 the process goup and group element number for the case of loop-induced 535 integration with MadEvent.""" 536 537 # When disabling the loop grouping in the LoopInduced MadEvent output, 538 # we have only the group_number set and the proc_id set to None. In this 539 # case we don't print the proc_id. 540 if not group_number is None and group_elem_number is None: 541 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 542 group_number,process.shell_string(print_id=False)) 543 elif group_number is None or group_elem_number is None: 544 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 545 else: 546 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 547 group_number, group_elem_number,process.shell_string(print_id=False))
548 549 #=========================================================================== 550 # Set the compiler to be gfortran for the loop processes. 551 #===========================================================================
552 - def compiler_choice(self, compiler=export_v4.default_compiler):
553 """ Different daughter classes might want different compilers. 554 Here, the gfortran compiler is used throughout the compilation 555 (mandatory for CutTools written in f90) """ 556 if isinstance(compiler, str): 557 fortran_compiler = compiler 558 compiler = export_v4.default_compiler 559 compiler['fortran'] = fortran_compiler 560 561 if not compiler['fortran'] is None and not \ 562 any([name in compiler['fortran'] for name in \ 563 ['gfortran','ifort']]): 564 logger.info('For loop processes, the compiler must be fortran90'+\ 565 'compatible, like gfortran.') 566 compiler['fortran'] = 'gfortran' 567 self.set_compiler(compiler,True) 568 else: 569 self.set_compiler(compiler) 570 571 self.set_cpp_compiler(compiler['cpp'])
572
573 - def turn_to_mp_calls(self, helas_calls_list):
574 # Prepend 'MP_' to all the helas calls in helas_calls_list. 575 # Might look like a brutal unsafe implementation, but it is not as 576 # these calls are built from the properties of the HELAS objects and 577 # whether they are evaluated in double or quad precision is none of 578 # their business but only relevant to the output algorithm. 579 # Also the cast to complex masses DCMPLX(*) must be replaced by 580 # CMPLX(*,KIND=16) 581 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 582 583 def replaceWith(match_obj): 584 return match_obj.group('toSub')+'MP_'
585 586 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 587 re.IGNORECASE | re.MULTILINE) 588 589 for i, helas_call in enumerate(helas_calls_list): 590 new_helas_call=MP.sub(replaceWith,helas_call) 591 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 592 new_helas_call)
593 597 605
606 - def make(self):
607 """ Compiles the additional dependences for loop (such as CutTools).""" 608 super(LoopProcessExporterFortranSA, self).make() 609 610 # make CutTools (only necessary with MG option output_dependencies='internal') 611 libdir = os.path.join(self.dir_path,'lib') 612 sourcedir = os.path.join(self.dir_path,'Source') 613 if self.dependencies=='internal': 614 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 615 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 616 if os.path.exists(pjoin(sourcedir,'CutTools')): 617 logger.info('Compiling CutTools (can take a couple of minutes) ...') 618 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 619 logger.info(' ...done.') 620 else: 621 raise MadGraph5Error('Could not compile CutTools because its'+\ 622 ' source directory could not be found in the SOURCE folder.') 623 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 624 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 625 raise MadGraph5Error('CutTools compilation failed.') 626 627 # Verify compatibility between current compiler and the one which was 628 # used when last compiling CutTools (if specified). 629 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 630 libdir, 'libcts.a')))),'compiler_version.log') 631 if os.path.exists(compiler_log_path): 632 compiler_version_used = open(compiler_log_path,'r').read() 633 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 634 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 635 if os.path.exists(pjoin(sourcedir,'CutTools')): 636 logger.info('CutTools was compiled with a different fortran'+\ 637 ' compiler. Re-compiling it now...') 638 misc.compile(['cleanCT'], cwd = sourcedir) 639 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 640 logger.info(' ...done.') 641 else: 642 raise MadGraph5Error("CutTools installation in %s"\ 643 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 644 " seems to have been compiled with a different compiler than"+\ 645 " the one specified in MG5_aMC. Please recompile CutTools.")
646
647 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
648 """Concatenate the coefficient information to reduce it to 649 (fraction, is_imaginary) """ 650 651 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 652 653 return (total_coeff, is_imaginary)
654
655 - def get_amp_to_jamp_map(self, col_amps, n_amps):
656 """ Returns a list with element 'i' being a list of tuples corresponding 657 to all apparition of amplitude number 'i' in the jamp number 'j' 658 with coeff 'coeff_j'. The format of each tuple describing an apparition 659 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 660 661 if(isinstance(col_amps,list)): 662 if(col_amps and isinstance(col_amps[0],list)): 663 color_amplitudes=col_amps 664 else: 665 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 666 else: 667 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 668 669 # To store the result 670 res_list = [[] for i in range(n_amps)] 671 for i, coeff_list in enumerate(color_amplitudes): 672 for (coefficient, amp_number) in coeff_list: 673 res_list[amp_number-1].append((i,self.cat_coeff(\ 674 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 675 676 return res_list
677
678 - def get_color_matrix(self, matrix_element):
679 """Return the color matrix definition lines. This color matrix is of size 680 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 681 amplitude.""" 682 683 logger.info('Computing diagram color coefficients') 684 685 # The two lists have a list of tuples at element 'i' which correspond 686 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 687 # with coeff 'coeffj'. The format of each tuple describing an apparition 688 # is (j, coeffj). 689 ampl_to_jampl=self.get_amp_to_jamp_map(\ 690 matrix_element.get_loop_color_amplitudes(), 691 matrix_element.get_number_of_loop_amplitudes()) 692 if matrix_element.get('processes')[0].get('has_born'): 693 ampb_to_jampb=self.get_amp_to_jamp_map(\ 694 matrix_element.get_born_color_amplitudes(), 695 matrix_element.get_number_of_born_amplitudes()) 696 else: 697 ampb_to_jampb=ampl_to_jampl 698 # Below is the original color matrix multiplying the JAMPS 699 if matrix_element.get('color_matrix'): 700 ColorMatrixDenom = \ 701 matrix_element.get('color_matrix').get_line_denominators() 702 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 703 get_line_numerators(index, denominator) for 704 (index, denominator) in enumerate(ColorMatrixDenom) ] 705 else: 706 ColorMatrixDenom= [1] 707 ColorMatrixNum = [[1]] 708 709 # Below is the final color matrix output 710 ColorMatrixNumOutput=[] 711 ColorMatrixDenomOutput=[] 712 713 # Now we construct the color factors between each born and loop amplitude 714 # by scanning their contributions to the different jamps. 715 start = time.time() 716 progress_bar = None 717 time_info = False 718 for i, jampl_list in enumerate(ampl_to_jampl): 719 # This can be pretty long for processes with many color flows. 720 # So, if necessary (i.e. for more than 15s), we tell the user the 721 # estimated time for the processing. 722 if i==5: 723 elapsed_time = time.time()-start 724 t = len(ampl_to_jampl)*(elapsed_time/5.0) 725 if t > 10.0: 726 time_info = True 727 logger.info('The color factors computation will take '+\ 728 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 729 'Started on %s.'%datetime.datetime.now().strftime(\ 730 "%d-%m-%Y %H:%M")) 731 if logger.getEffectiveLevel()<logging.WARNING: 732 widgets = ['Color computation:', pbar.Percentage(), ' ', 733 pbar.Bar(),' ', pbar.ETA(), ' '] 734 progress_bar = pbar.ProgressBar(widgets=widgets, 735 maxval=len(ampl_to_jampl), fd=sys.stdout) 736 737 if not progress_bar is None: 738 progress_bar.update(i+1) 739 # Flush to force the printout of the progress_bar to be updated 740 sys.stdout.flush() 741 742 line_num=[] 743 line_denom=[] 744 745 # Treat the special case where this specific amplitude contributes to no 746 # color flow at all. So it is zero because of color but not even due to 747 # an accidental cancellation among color flows, but simply because of its 748 # projection to each individual color flow is zero. In such case, the 749 # corresponding jampl_list is empty and all color coefficients must then 750 # be zero. This happens for example in the Higgs Effective Theory model 751 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 752 if len(jampl_list)==0: 753 line_num=[0]*len(ampb_to_jampb) 754 line_denom=[1]*len(ampb_to_jampb) 755 ColorMatrixNumOutput.append(line_num) 756 ColorMatrixDenomOutput.append(line_denom) 757 continue 758 759 for jampb_list in ampb_to_jampb: 760 real_num=0 761 imag_num=0 762 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 763 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 764 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 765 itertools.product(jampl_list,jampb_list)]) 766 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 767 itertools.product(jampl_list,jampb_list): 768 # take the numerator and multiply by lcm/denominator 769 # as we will later divide by the lcm. 770 buff_num=ampl_coeff[0].numerator*\ 771 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 772 abs(common_denom)/(ampl_coeff[0].denominator*\ 773 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 774 # Remember that we must take the complex conjugate of 775 # the born jamp color coefficient because we will compute 776 # the square with 2 Re(LoopAmp x BornAmp*) 777 if ampl_coeff[1] and ampb_coeff[1]: 778 real_num=real_num+buff_num 779 elif not ampl_coeff[1] and not ampb_coeff[1]: 780 real_num=real_num+buff_num 781 elif not ampl_coeff[1] and ampb_coeff[1]: 782 imag_num=imag_num-buff_num 783 else: 784 imag_num=imag_num+buff_num 785 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 786 "color matrix element which has both a real and imaginary part." 787 if imag_num!=0: 788 res=fractions.Fraction(imag_num,common_denom) 789 line_num.append(res.numerator) 790 # Negative denominator means imaginary color coef of the 791 # final color matrix 792 line_denom.append(res.denominator*-1) 793 else: 794 res=fractions.Fraction(real_num,common_denom) 795 line_num.append(res.numerator) 796 # Positive denominator means real color coef of the final color matrix 797 line_denom.append(res.denominator) 798 799 ColorMatrixNumOutput.append(line_num) 800 ColorMatrixDenomOutput.append(line_denom) 801 802 if time_info: 803 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 804 "%d-%m-%Y %H:%M")) 805 if progress_bar!=None: 806 progress_bar.finish() 807 808 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
809
810 - def get_context(self,matrix_element):
811 """ Returns the contextual variables which need to be set when 812 pre-processing the template files.""" 813 814 # The nSquaredSO entry of the general replace dictionary should have 815 # been set in write_loopmatrix prior to the first call to this function 816 # However, for cases where the TIRCaching contextual variable is 817 # irrelevant (like in the default output), this might not be the case 818 # so we set it to 1. 819 try: 820 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 821 except (KeyError, AttributeError): 822 n_squared_split_orders = 1 823 824 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 825 self.has_loop_induced = max(LoopInduced, self.has_loop_induced) 826 # Force the computation of loop color flows for loop_induced processes 827 ComputeColorFlows = self.compute_color_flows or LoopInduced 828 # The variable AmplitudeReduction is just to make the contextual 829 # conditions more readable in the include files. 830 AmplitudeReduction = LoopInduced or ComputeColorFlows 831 # Even when not reducing at the amplitude level, the TIR caching 832 # is useful when there is more than one squared split order config. 833 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 834 MadEventOutput = False 835 return {'LoopInduced': LoopInduced, 836 'ComputeColorFlows': ComputeColorFlows, 837 'AmplitudeReduction': AmplitudeReduction, 838 'TIRCaching': TIRCaching, 839 'MadEventOutput': MadEventOutput}
840 841 842 #=========================================================================== 843 # generate_subprocess_directory 844 #===========================================================================
845 - def generate_loop_subprocess(self, matrix_element, fortran_model, 846 group_number = None, proc_id = None, config_map=None, unique_id=None):
847 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 848 including the necessary loop_matrix.f, born_matrix.f and include files. 849 Notice that this is too different from generate_subprocess_directory 850 so that there is no point reusing this mother function. 851 The 'group_number' and 'proc_id' options are only used for the LoopInduced 852 MadEvent output and only to specify the ME_identifier and the P* 853 SubProcess directory name.""" 854 855 cwd = os.getcwd() 856 proc_dir_name = self.get_SubProc_folder_name( 857 matrix_element.get('processes')[0],group_number,proc_id) 858 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 859 860 try: 861 os.mkdir(dirpath) 862 except os.error as error: 863 logger.warning(error.strerror + " " + dirpath) 864 865 try: 866 os.chdir(dirpath) 867 except os.error: 868 logger.error('Could not cd to directory %s' % dirpath) 869 return 0 870 871 logger.info('Creating files in directory %s' % dirpath) 872 873 if unique_id is None: 874 raise MadGraph5Error, 'A unique id must be provided to the function'+\ 875 'generate_loop_subprocess of LoopProcessExporterFortranSA.' 876 # Create an include with the unique consecutive ID assigned 877 open('unique_id.inc','w').write( 878 """ integer UNIQUE_ID 879 parameter(UNIQUE_ID=%d)"""%unique_id) 880 881 # Extract number of external particles 882 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 883 884 calls=self.write_loop_matrix_element_v4(None,matrix_element, 885 fortran_model, group_number = group_number, 886 proc_id = proc_id, config_map = config_map) 887 888 # We assume here that all processes must share the same property of 889 # having a born or not, which must be true anyway since these are two 890 # definite different classes of processes which can never be treated on 891 # the same footing. 892 if matrix_element.get('processes')[0].get('has_born'): 893 filename = 'born_matrix.f' 894 calls = self.write_bornmatrix( 895 writers.FortranWriter(filename), 896 matrix_element, 897 fortran_model) 898 899 filename = 'pmass.inc' 900 self.write_pmass_file(writers.FortranWriter(filename), 901 matrix_element) 902 903 filename = 'ngraphs.inc' 904 self.write_ngraphs_file(writers.FortranWriter(filename), 905 len(matrix_element.get_all_amplitudes())) 906 907 # Do not draw the loop diagrams if they are too many. 908 # The user can always decide to do it manually, if really needed 909 loop_diags = [loop_diag for loop_diag in\ 910 matrix_element.get('base_amplitude').get('loop_diagrams')\ 911 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 912 if len(loop_diags)>5000: 913 logger.info("There are more than 5000 loop diagrams."+\ 914 "Only the first 5000 are drawn.") 915 filename = "loop_matrix.ps" 916 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 917 loop_diags[:5000]),filename, 918 model=matrix_element.get('processes')[0].get('model'),amplitude='') 919 logger.info("Drawing loop Feynman diagrams for " + \ 920 matrix_element.get('processes')[0].nice_string()) 921 plot.draw() 922 923 if matrix_element.get('processes')[0].get('has_born'): 924 filename = "born_matrix.ps" 925 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 926 get('born_diagrams'), 927 filename, 928 model=matrix_element.get('processes')[0].\ 929 get('model'), 930 amplitude='') 931 logger.info("Generating born Feynman diagrams for " + \ 932 matrix_element.get('processes')[0].nice_string(\ 933 print_weighted=False)) 934 plot.draw() 935 936 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 937 matrix_element.get('processes')[0],group_number,proc_id)) 938 939 # Return to original PWD 940 os.chdir(cwd) 941 942 if not calls: 943 calls = 0 944 return calls
945 966
967 - def generate_general_replace_dict(self,matrix_element, 968 group_number = None, proc_id = None):
969 """Generates the entries for the general replacement dictionary used 970 for the different output codes for this exporter.The arguments 971 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 972 973 dict={} 974 # A general process prefix which appears in front of all MadLooop 975 # subroutines and common block so that several processes can be compiled 976 # together into one library, as necessary to follow BLHA guidelines. 977 978 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 979 group_number = group_number, group_elem_number = proc_id) 980 981 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 982 for proc in matrix_element.get('processes'): 983 ids = [l.get('id') for l in proc.get('legs_with_decays')] 984 self.prefix_info[tuple(ids)] = [dict['proc_prefix'], proc.get_tag()] 985 986 # The proc_id is used for MadEvent grouping, so none of our concern here 987 # and it is simply set to an empty string. 988 dict['proc_id'] = '' 989 # Extract version number and date from VERSION file 990 info_lines = self.get_mg5_info_lines() 991 dict['info_lines'] = info_lines 992 # Extract process info lines 993 process_lines = self.get_process_info_lines(matrix_element) 994 dict['process_lines'] = process_lines 995 # Extract number of external particles 996 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 997 dict['nexternal'] = nexternal 998 dict['nincoming'] = ninitial 999 # Extract ncomb 1000 ncomb = matrix_element.get_helicity_combinations() 1001 dict['ncomb'] = ncomb 1002 # Extract nloopamps 1003 nloopamps = matrix_element.get_number_of_loop_amplitudes() 1004 dict['nloopamps'] = nloopamps 1005 # Extract nloopdiags 1006 nloopdiags = len(matrix_element.get('diagrams')) 1007 dict['nloopdiags'] = nloopdiags 1008 # Extract nctamps 1009 nctamps = matrix_element.get_number_of_CT_amplitudes() 1010 dict['nctamps'] = nctamps 1011 # Extract nwavefuncs 1012 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 1013 dict['nwavefuncs'] = nwavefuncs 1014 # Set format of the double precision 1015 dict['real_dp_format']='real*8' 1016 dict['real_mp_format']='real*16' 1017 # Set format of the complex 1018 dict['complex_dp_format']='complex*16' 1019 dict['complex_mp_format']='complex*32' 1020 # Set format of the masses 1021 dict['mass_dp_format'] = dict['complex_dp_format'] 1022 dict['mass_mp_format'] = dict['complex_mp_format'] 1023 # Fill in default values for the placeholders for the madevent 1024 # loop-induced output 1025 dict['nmultichannels'] = 0 1026 dict['nmultichannel_configs'] = 0 1027 dict['config_map_definition'] = '' 1028 dict['config_index_map_definition'] = '' 1029 # Color matrix size 1030 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 1031 # it is NLOOPAMPSxNBORNAMPS 1032 # Also, how to access the number of Born squared order contributions 1033 1034 if matrix_element.get('processes')[0].get('has_born'): 1035 dict['color_matrix_size'] = 'nbornamps' 1036 dict['get_nsqso_born']=\ 1037 "include 'nsqso_born.inc'" 1038 else: 1039 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 1040 PARAMETER (NSQSO_BORN=0) 1041 """ 1042 dict['color_matrix_size'] = 'nloopamps' 1043 1044 # These placeholders help to have as many common templates for the 1045 # output of the loop induced processes and those with a born 1046 # contribution. 1047 if matrix_element.get('processes')[0].get('has_born'): 1048 # Extract nbornamps 1049 nbornamps = matrix_element.get_number_of_born_amplitudes() 1050 dict['nbornamps'] = nbornamps 1051 dict['ncomb_helas_objs'] = ',ncomb' 1052 dict['nbornamps_decl'] = \ 1053 """INTEGER NBORNAMPS 1054 PARAMETER (NBORNAMPS=%d)"""%nbornamps 1055 dict['nBornAmps'] = nbornamps 1056 1057 else: 1058 dict['ncomb_helas_objs'] = '' 1059 dict['dp_born_amps_decl'] = '' 1060 dict['dp_born_amps_decl_in_mp'] = '' 1061 dict['copy_mp_to_dp_born_amps'] = '' 1062 dict['mp_born_amps_decl'] = '' 1063 dict['nbornamps_decl'] = '' 1064 dict['nbornamps'] = 0 1065 dict['nBornAmps'] = 0 1066 1067 return dict
1068
1069 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1070 group_number = None, proc_id = None, config_map = None):
1071 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 1072 mp_born_amps_and_wfs. 1073 The arguments group_number and proc_id are just for the LoopInduced 1074 output with MadEvent and only used in get_ME_identifier. 1075 """ 1076 1077 # Create the necessary files for the loop matrix element subroutine 1078 1079 if config_map: 1080 raise MadGraph5Error, 'The default loop output cannot be used with'+\ 1081 'MadEvent and cannot compute the AMP2 for multi-channeling.' 1082 1083 if not isinstance(fortran_model,\ 1084 helas_call_writers.FortranUFOHelasCallWriter): 1085 raise MadGraph5Error, 'The loop fortran output can only'+\ 1086 ' work with a UFO Fortran model' 1087 1088 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 1089 argument=fortran_model.get('model'), 1090 hel_sum=matrix_element.get('processes')[0].get('has_born')) 1091 1092 # Compute the analytical information of the loop wavefunctions in the 1093 # loop helas matrix elements using the cached aloha model to reuse 1094 # as much as possible the aloha computations already performed for 1095 # writing out the aloha fortran subroutines. 1096 matrix_element.compute_all_analytic_information( 1097 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1098 1099 # Initialize a general replacement dictionary with entries common to 1100 # many files generated here. 1101 matrix_element.rep_dict = self.generate_general_replace_dict( 1102 matrix_element, group_number = group_number, proc_id = proc_id) 1103 1104 # Extract max number of loop couplings (specific to this output type) 1105 matrix_element.rep_dict['maxlcouplings']= \ 1106 matrix_element.find_max_loop_coupling() 1107 # The born amp declaration suited for also outputing the loop-induced 1108 # processes as well. 1109 if matrix_element.get('processes')[0].get('has_born'): 1110 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 1111 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 1112 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 1113 matrix_element.rep_dict['dp_born_amps_decl'] = \ 1114 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1115 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1116 matrix_element.rep_dict['mp_born_amps_decl'] = \ 1117 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 1118 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 1119 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 1120 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 1121 1122 if writer: 1123 raise MadGraph5Error, 'Matrix output mode no longer supported.' 1124 1125 filename = 'loop_matrix.f' 1126 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1127 matrix_element, 1128 LoopFortranModel) 1129 1130 # Write out the proc_prefix in a file, this is quite handy 1131 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1132 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1133 proc_prefix_writer.close() 1134 1135 filename = 'check_sa.f' 1136 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1137 1138 filename = 'CT_interface.f' 1139 self.write_CT_interface(writers.FortranWriter(filename),\ 1140 matrix_element) 1141 1142 1143 1144 filename = 'improve_ps.f' 1145 calls = self.write_improve_ps(writers.FortranWriter(filename), 1146 matrix_element) 1147 1148 filename = 'loop_num.f' 1149 self.write_loop_num(writers.FortranWriter(filename),\ 1150 matrix_element,LoopFortranModel) 1151 1152 filename = 'mp_born_amps_and_wfs.f' 1153 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 1154 matrix_element,LoopFortranModel) 1155 1156 # Extract number of external particles 1157 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1158 filename = 'nexternal.inc' 1159 self.write_nexternal_file(writers.FortranWriter(filename), 1160 nexternal, ninitial) 1161 1162 filename = 'process_info.inc' 1163 self.write_process_info_file(writers.FortranWriter(filename), 1164 matrix_element) 1165 return calls
1166
1167 - def write_process_info_file(self, writer, matrix_element):
1168 """A small structural function to write the include file specifying some 1169 process characteristics.""" 1170 1171 model = matrix_element.get('processes')[0].get('model') 1172 process_info = {} 1173 # The maximum spin of any particle connected (or directly running in) 1174 # any loop of this matrix element. This is important because there is 1175 # some limitation in the stability tests that can be performed when this 1176 # maximum spin is above 3 (vectors). Also CutTools has limitations in 1177 # this regard. 1178 process_info['max_spin_connected_to_loop']=\ 1179 matrix_element.get_max_spin_connected_to_loop() 1180 1181 process_info['max_spin_external_particle']= max( 1182 model.get_particle(l.get('id')).get('spin') for l in 1183 matrix_element.get('processes')[0].get('legs')) 1184 1185 proc_include = \ 1186 """ 1187 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1188 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1189 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1190 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1191 """%process_info 1192 1193 writer.writelines(proc_include)
1194
1195 - def generate_subprocess_directory(self, matrix_element, fortran_model):
1196 """ To overload the default name for this function such that the correct 1197 function is used when called from the command interface """ 1198 1199 self.unique_id +=1 1200 return self.generate_loop_subprocess(matrix_element,fortran_model, 1201 unique_id=self.unique_id)
1202
1203 - def write_check_sa(self, writer, matrix_element):
1204 """Writes out the steering code check_sa. In the optimized output mode, 1205 All the necessary entries in the replace_dictionary have already been 1206 set in write_loopmatrix because it is only there that one has access to 1207 the information about split orders.""" 1208 replace_dict = copy.copy(matrix_element.rep_dict) 1209 for key in ['print_so_born_results','print_so_loop_results', 1210 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1211 if key not in replace_dict.keys(): 1212 replace_dict[key]='' 1213 1214 if matrix_element.get('processes')[0].get('has_born'): 1215 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1216 else: 1217 file = open(os.path.join(self.template_dir,\ 1218 'check_sa_loop_induced.inc')).read() 1219 file=file%replace_dict 1220 writer.writelines(file) 1221 1222 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1223 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1224 return 1225 1226 file = open(os.path.join(self.template_dir,\ 1227 'check_py.f.inc')).read() 1228 1229 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']: 1230 replace_dict['prefix_routine'] = replace_dict['proc_prefix'] 1231 else: 1232 replace_dict['prefix_routine'] = '' 1233 file=file%replace_dict 1234 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1235 new_writer = writer.__class__(new_path, 'w') 1236 new_writer.writelines(file) 1237 1238 file = open(os.path.join(self.template_dir,\ 1239 'check_sa.py.inc')).read() 1240 # For now just put in an empty PS point but in the future, maybe generate 1241 # a valid one already here by default 1242 curr_proc = matrix_element.get('processes')[0] 1243 random_PSpoint_python_formatted = \ 1244 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1245 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1246 1247 process_definition_string = curr_proc.nice_string().replace('Process:','') 1248 file=file.format(random_PSpoint_python_formatted,process_definition_string, 1249 replace_dict['proc_prefix'].lower()) 1250 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1251 new_writer = open(new_path, 'w') 1252 new_writer.writelines(file) 1253 # Make it executable 1254 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1255
1256 - def write_improve_ps(self, writer, matrix_element):
1257 """ Write out the improve_ps subroutines which modify the PS point 1258 given in input and slightly deform it to achieve exact onshellness on 1259 all external particles as well as perfect energy-momentum conservation""" 1260 replace_dict = copy.copy(matrix_element.rep_dict) 1261 1262 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1263 replace_dict['ninitial']=ninitial 1264 mass_list=matrix_element.get_external_masses()[:-2] 1265 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1266 1267 # Write the quadruple precision version of this routine only. 1268 replace_dict['real_format']=replace_dict['real_mp_format'] 1269 replace_dict['mp_prefix']='MP_' 1270 replace_dict['exp_letter']='e' 1271 replace_dict['mp_specifier']='_16' 1272 replace_dict['coupl_inc_name']='mp_coupl.inc' 1273 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1274 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1275 i, m in enumerate(mass_list)]) 1276 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1277 file_mp=file_mp%replace_dict 1278 # 1279 writer.writelines(file_mp)
1280
1281 - def write_loop_num(self, writer, matrix_element,fortran_model):
1282 """ Create the file containing the core subroutine called by CutTools 1283 which contains the Helas calls building the loop""" 1284 1285 if not matrix_element.get('processes') or \ 1286 not matrix_element.get('diagrams'): 1287 return 0 1288 1289 # Set lowercase/uppercase Fortran code 1290 writers.FortranWriter.downcase = False 1291 1292 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1293 1294 replace_dict = copy.copy(matrix_element.rep_dict) 1295 1296 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1297 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1298 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1299 1300 # The squaring is only necessary for the processes with born where the 1301 # sum over helicities is done before sending the numerator to CT. 1302 dp_squaring_lines=['DO I=1,NBORNAMPS', 1303 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1304 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1305 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1306 mp_squaring_lines=['DO I=1,NBORNAMPS', 1307 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1308 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1309 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1310 if matrix_element.get('processes')[0].get('has_born'): 1311 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1312 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1313 else: 1314 replace_dict['dp_squaring']='RES=BUFF' 1315 replace_dict['mp_squaring']='QPRES=BUFF' 1316 1317 # Prepend MP_ to all helas calls. 1318 self.turn_to_mp_calls(loop_helas_calls) 1319 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1320 1321 file=file%replace_dict 1322 1323 if writer: 1324 writer.writelines(file) 1325 else: 1326 return file
1327
1328 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1329 """ Create the file CT_interface.f which contains the subroutine defining 1330 the loop HELAS-like calls along with the general interfacing subroutine. 1331 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1332 1333 files=[] 1334 1335 # First write CT_interface which interfaces MG5 with CutTools. 1336 replace_dict=copy.copy(matrix_element.rep_dict) 1337 1338 # We finalize CT result differently wether we used the built-in 1339 # squaring against the born. 1340 if matrix_element.get('processes')[0].get('has_born'): 1341 replace_dict['finalize_CT']='\n'.join([\ 1342 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1343 else: 1344 replace_dict['finalize_CT']='\n'.join([\ 1345 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1346 1347 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1348 1349 file = file % replace_dict 1350 files.append(file) 1351 1352 # Now collect the different kind of subroutines needed for the 1353 # loop HELAS-like calls. 1354 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1355 1356 for callkey in HelasLoopAmpsCallKeys: 1357 replace_dict=copy.copy(matrix_element.rep_dict) 1358 # Add to this dictionary all other attribute common to all 1359 # HELAS-like loop subroutines. 1360 if matrix_element.get('processes')[0].get('has_born'): 1361 replace_dict['validh_or_nothing']=',validh' 1362 else: 1363 replace_dict['validh_or_nothing']='' 1364 # In the optimized output, the number of couplings in the loop is 1365 # not specified so we only treat it here if necessary: 1366 if len(callkey)>2: 1367 replace_dict['ncplsargs']=callkey[2] 1368 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1369 replace_dict['cplsargs']=cplsargs 1370 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1371 replace_dict['cplsdecl']=cplsdecl 1372 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1373 replace_dict['mp_cplsdecl']=mp_cplsdecl 1374 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1375 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1376 for i in range(1,callkey[2]+1)]) 1377 replace_dict['cplset']=cplset 1378 1379 replace_dict['nloopline']=callkey[0] 1380 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1381 replace_dict['wfsargs']=wfsargs 1382 # We don't pass the multiple precision mass in the optimized_output 1383 if not optimized_output: 1384 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1385 else: 1386 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1387 replace_dict['margs']=margs 1388 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1389 replace_dict['wfsargsdecl']=wfsargsdecl 1390 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1391 replace_dict['margsdecl']=margsdecl 1392 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1393 replace_dict['mp_margsdecl']=mp_margsdecl 1394 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1395 i in range(1,callkey[1]+1)]) 1396 replace_dict['weset']=weset 1397 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1398 replace_dict['weset']=weset 1399 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1400 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1401 i in range(2,callkey[0]+1)]) 1402 replace_dict['mset']=mset 1403 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1404 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1405 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1406 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1407 i in range(3,callkey[0]+3)]) 1408 replace_dict['mset2']=mset2 1409 replace_dict['nwfsargs'] = callkey[1] 1410 if callkey[0]==callkey[1]: 1411 replace_dict['nwfsargs_header'] = "" 1412 replace_dict['pairingargs']="" 1413 replace_dict['pairingdecl']="" 1414 pairingset="""DO I=1,NLOOPLINE 1415 PAIRING(I)=1 1416 ENDDO 1417 """ 1418 replace_dict['pairingset']=pairingset 1419 else: 1420 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1421 pairingargs="".join([("P"+str(i)+", ") for i in \ 1422 range(1,callkey[0]+1)]) 1423 replace_dict['pairingargs']=pairingargs 1424 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1425 range(1,callkey[0]+1)])[:-2] 1426 replace_dict['pairingdecl']=pairingdecl 1427 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1428 i in range(1,callkey[0]+1)]) 1429 replace_dict['pairingset']=pairingset 1430 1431 file = open(os.path.join(self.template_dir,\ 1432 'helas_loop_amplitude.inc')).read() 1433 file = file % replace_dict 1434 files.append(file) 1435 1436 file="\n".join(files) 1437 1438 if writer: 1439 writer.writelines(file,context=self.get_context(matrix_element)) 1440 else: 1441 return file
1442 1443 # Helper function to split HELAS CALLS in dedicated subroutines placed 1444 # in different files.
1445 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1446 helas_calls, entry_name, bunch_name,n_helas=2000, 1447 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1448 continue_label = 1000, momenta_array_name='P', 1449 context={}):
1450 """ Finish the code generation with splitting. 1451 Split the helas calls in the argument helas_calls into bunches of 1452 size n_helas and place them in dedicated subroutine with name 1453 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1454 in the replace_dict dictionary under the entry entry_name. 1455 The context specified will be forwarded to the the fileWriter.""" 1456 helascalls_replace_dict=copy.copy(replace_dict) 1457 helascalls_replace_dict['bunch_name']=bunch_name 1458 helascalls_files=[] 1459 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1460 helascalls_replace_dict['bunch_number']=i+1 1461 helascalls_replace_dict['helas_calls']=\ 1462 '\n'.join(helas_calls[k:k + n_helas]) 1463 helascalls_replace_dict['required_so_broadcaster']=\ 1464 required_so_broadcaster 1465 helascalls_replace_dict['continue_label']=continue_label 1466 new_helascalls_file = open(os.path.join(self.template_dir,\ 1467 template_name)).read() 1468 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1469 helascalls_files.append(new_helascalls_file) 1470 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1471 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1472 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1473 for a in range(len(helascalls_files))] 1474 replace_dict[entry_name]='\n'.join(helascalls_calls) 1475 if writer: 1476 for i, helascalls_file in enumerate(helascalls_files): 1477 filename = '%s_%d.f'%(bunch_name,i+1) 1478 writers.FortranWriter(filename).writelines(helascalls_file, 1479 context=context) 1480 else: 1481 masterfile='\n'.join([masterfile,]+helascalls_files) 1482 1483 return masterfile
1484
1485 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1486 noSplit=False):
1487 """Create the loop_matrix.f file.""" 1488 1489 if not matrix_element.get('processes') or \ 1490 not matrix_element.get('diagrams'): 1491 return 0 1492 1493 # Set lowercase/uppercase Fortran code 1494 1495 writers.FortranWriter.downcase = False 1496 1497 replace_dict = copy.copy(matrix_element.rep_dict) 1498 1499 # Extract overall denominator 1500 # Averaging initial state color, spin, and identical FS particles 1501 den_factor_line = self.get_den_factor_line(matrix_element) 1502 replace_dict['den_factor_line'] = den_factor_line 1503 # When the user asks for the polarized matrix element we must 1504 # multiply back by the helicity averaging factor 1505 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1506 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 1507 matrix_element.get_beams_hel_avg_factor() 1508 1509 # These entries are specific for the output for loop-induced processes 1510 # Also sets here the details of the squaring of the loop ampltiudes 1511 # with the born or the loop ones. 1512 if not matrix_element.get('processes')[0].get('has_born'): 1513 replace_dict['compute_born']=\ 1514 """C There is of course no born for loop induced processes 1515 ANS(0)=0.0d0 1516 """ 1517 replace_dict['set_reference']='\n'.join([ 1518 'C For loop-induced, the reference for comparison is set later'+\ 1519 ' from the total contribution of the previous PS point considered.', 1520 'C But you can edit here the value to be used for the first PS point.', 1521 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1522 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1523 replace_dict['loop_induced_setup'] = '\n'.join([ 1524 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1525 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1526 replace_dict['loop_induced_finalize'] = \ 1527 ("""DO I=NCTAMPS+1,NLOOPAMPS 1528 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1529 WRITE(*,*) '##W03 WARNING Contribution ',I 1530 WRITE(*,*) ' is unstable for helicity ',H 1531 ENDIF 1532 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1533 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1534 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1535 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1536 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1537 C ENDIF 1538 ENDDO 1539 1227 CONTINUE 1540 HELPICKED=HELPICKED_BU""")%replace_dict 1541 replace_dict['loop_helas_calls']="" 1542 replace_dict['nctamps_or_nloopamps']='nloopamps' 1543 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1544 replace_dict['squaring']=\ 1545 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1546 IF (J.EQ.1) THEN 1547 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1548 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1549 ENDIF""" 1550 else: 1551 replace_dict['compute_born']=\ 1552 """C Compute the born, for a specific helicity if asked so. 1553 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1554 """%matrix_element.rep_dict 1555 replace_dict['set_reference']=\ 1556 """C We chose to use the born evaluation for the reference 1557 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1558 replace_dict['loop_induced_helas_calls'] = "" 1559 replace_dict['loop_induced_finalize'] = "" 1560 replace_dict['loop_induced_setup'] = "" 1561 replace_dict['nctamps_or_nloopamps']='nctamps' 1562 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1563 replace_dict['squaring']='\n'.join(['DO K=1,3', 1564 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1565 'ENDDO']) 1566 1567 # Write a dummy nsquaredSO.inc which is used in the default 1568 # loop_matrix.f code (even though it does not support split orders evals) 1569 # just to comply with the syntax expected from the external code using MadLoop. 1570 writers.FortranWriter('nsquaredSO.inc').writelines( 1571 """INTEGER NSQUAREDSO 1572 PARAMETER (NSQUAREDSO=0)""") 1573 1574 # Actualize results from the loops computed. Only necessary for 1575 # processes with a born. 1576 actualize_ans=[] 1577 if matrix_element.get('processes')[0].get('has_born'): 1578 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1579 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1580 in range(1,4)) 1581 actualize_ans.append(\ 1582 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1583 actualize_ans.append(\ 1584 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1585 actualize_ans.extend(["ENDIF","ENDDO"]) 1586 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1587 else: 1588 replace_dict['actualize_ans']=\ 1589 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1590 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1591 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1592 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1593 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1594 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1595 C ENDIF""")%replace_dict 1596 1597 # Write out the color matrix 1598 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1599 CMWriter=open(pjoin('..','MadLoop5_resources', 1600 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1601 for ColorLine in CMNum: 1602 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1603 CMWriter.close() 1604 CMWriter=open(pjoin('..','MadLoop5_resources', 1605 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1606 for ColorLine in CMDenom: 1607 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1608 CMWriter.close() 1609 1610 # Write out the helicity configurations 1611 HelConfigs=matrix_element.get_helicity_matrix() 1612 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1613 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1614 for HelConfig in HelConfigs: 1615 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1616 HelConfigWriter.close() 1617 1618 # Extract helas calls 1619 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1620 matrix_element) 1621 # The proc_prefix must be replaced 1622 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1623 for lc in loop_amp_helas_calls] 1624 1625 born_ct_helas_calls, UVCT_helas_calls = \ 1626 fortran_model.get_born_ct_helas_calls(matrix_element) 1627 # In the default output, we do not need to separate these two kind of 1628 # contributions 1629 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1630 file = open(os.path.join(self.template_dir,\ 1631 1632 'loop_matrix_standalone.inc')).read() 1633 1634 if matrix_element.get('processes')[0].get('has_born'): 1635 toBeRepaced='loop_helas_calls' 1636 else: 1637 toBeRepaced='loop_induced_helas_calls' 1638 1639 # Decide here wether we need to split the loop_matrix.f file or not. 1640 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1641 file=self.split_HELASCALLS(writer,replace_dict,\ 1642 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1643 'born_ct_helas_calls','helas_calls_ampb') 1644 file=self.split_HELASCALLS(writer,replace_dict,\ 1645 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1646 toBeRepaced,'helas_calls_ampl') 1647 else: 1648 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1649 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1650 1651 file = file % replace_dict 1652 1653 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1654 n_loop_calls = len(filter(lambda call: 1655 not loop_calls_finder.match(call) is None, loop_amp_helas_calls)) 1656 if writer: 1657 # Write the file 1658 writer.writelines(file) 1659 return n_loop_calls 1660 else: 1661 # Return it to be written along with the others 1662 return n_loop_calls, file
1663
1664 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1665 """Create the born_matrix.f file for the born process as for a standard 1666 tree-level computation.""" 1667 1668 if not matrix_element.get('processes') or \ 1669 not matrix_element.get('diagrams'): 1670 return 0 1671 1672 if not isinstance(writer, writers.FortranWriter): 1673 raise writers.FortranWriter.FortranWriterError(\ 1674 "writer not FortranWriter") 1675 1676 # For now, we can use the exact same treatment as for tree-level 1677 # computations by redefining here a regular HelasMatrixElementf or the 1678 # born process. 1679 # It is important to make a deepcopy, as we don't want any possible 1680 # treatment on the objects of the bornME to have border effects on 1681 # the content of the LoopHelasMatrixElement object. 1682 bornME = helas_objects.HelasMatrixElement() 1683 for prop in bornME.keys(): 1684 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1685 bornME.set('base_amplitude',None,force=True) 1686 bornME.set('diagrams',copy.deepcopy(\ 1687 matrix_element.get_born_diagrams())) 1688 bornME.set('color_basis',copy.deepcopy(\ 1689 matrix_element.get('born_color_basis'))) 1690 bornME.set('color_matrix',copy.deepcopy(\ 1691 color_amp.ColorMatrix(bornME.get('color_basis')))) 1692 # This is to decide wether once to reuse old wavefunction to store new 1693 # ones (provided they are not used further in the code.) 1694 bornME.optimization = True 1695 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1696 writer, bornME, fortran_model, 1697 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1698
1699 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1700 noSplit=False):
1701 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1702 computes just the external wavefunction and born amplitudes in 1703 multiple precision. """ 1704 1705 if not matrix_element.get('processes') or \ 1706 not matrix_element.get('diagrams'): 1707 return 0 1708 1709 replace_dict = copy.copy(matrix_element.rep_dict) 1710 1711 # For the wavefunction copy, check what suffix is needed for the W array 1712 if matrix_element.get('processes')[0].get('has_born'): 1713 replace_dict['h_w_suffix']=',H' 1714 else: 1715 replace_dict['h_w_suffix']='' 1716 1717 # Extract helas calls 1718 born_amps_and_wfs_calls , uvct_amp_calls = \ 1719 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1720 # In the default output, these two kind of contributions do not need to 1721 # be differentiated 1722 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1723 1724 # Turn these HELAS calls to the multiple-precision version of the HELAS 1725 # subroutines. 1726 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1727 1728 file = open(os.path.join(self.template_dir,\ 1729 'mp_born_amps_and_wfs.inc')).read() 1730 # Decide here wether we need to split the loop_matrix.f file or not. 1731 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1732 file=self.split_HELASCALLS(writer,replace_dict,\ 1733 'mp_helas_calls_split.inc',file,\ 1734 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1735 'mp_helas_calls') 1736 else: 1737 replace_dict['born_amps_and_wfs_calls']=\ 1738 '\n'.join(born_amps_and_wfs_calls) 1739 1740 file = file % replace_dict 1741 if writer: 1742 # Write the file 1743 writer.writelines(file) 1744 else: 1745 # Return it to be written along with the others 1746 return file 1747 1748 #=============================================================================== 1749 # LoopProcessOptimizedExporterFortranSA 1750 #=============================================================================== 1751
1752 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1753 """Class to take care of exporting a set of loop matrix elements in the 1754 Fortran format which exploits the Pozzorini method of representing 1755 the loop numerators as polynomial to render its evaluations faster.""" 1756 1757 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1758 # The option below controls wether one wants to group together in one single 1759 # CutTools/TIR call the loops with same denominator structure 1760 forbid_loop_grouping = False 1761 1762 # List of potential TIR library one wants to link to. 1763 # Golem and Samurai will typically get obtained from gosam_contrib 1764 # which might also contain a version of ninja. We must therefore 1765 # make sure that ninja appears first in the list of -L because 1766 # it is the tool for which the user is most susceptible of 1767 # using a standalone verison independent of gosam_contrib 1768 all_tir=['pjfry','iregi','ninja','golem','samurai','collier'] 1769
1770 - def __init__(self, dir_path = "", opt=None):
1771 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1772 information on where to find all the loop-related source files, 1773 like CutTools and TIR""" 1774 1775 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt) 1776 1777 # TIR available ones 1778 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1779 'samurai':True,'ninja':True,'collier':True} 1780 1781 for tir in self.all_tir: 1782 tir_dir="%s_dir"%tir 1783 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1784 # Make sure to defer the 'local path' to the current MG5aMC root. 1785 tir_path = self.opt[tir_dir].strip() 1786 if tir_path.startswith('.'): 1787 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1788 setattr(self,tir_dir,tir_path) 1789 else: 1790 setattr(self,tir_dir,'')
1791
1792 - def copy_template(self, model):
1793 """Additional actions needed to setup the Template. 1794 """ 1795 1796 super(LoopProcessOptimizedExporterFortranSA, self).copy_template(model) 1797 1798 self.loop_optimized_additional_template_setup()
1799
1800 - def get_context(self,matrix_element, **opts):
1801 """ Additional contextual information which needs to be created for 1802 the optimized output.""" 1803 1804 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1805 **opts) 1806 1807 # For now assume Ninja always supports quadruple precision 1808 try: 1809 context['ninja_supports_quad_prec'] = \ 1810 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1811 except AttributeError: 1812 context['ninja_supports_quad_prec'] = False 1813 1814 for tir in self.all_tir: 1815 context['%s_available'%tir]=self.tir_available_dict[tir] 1816 # safety check 1817 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']: 1818 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name 1819 1820 return context
1821
1823 """ Perform additional actions specific for this class when setting 1824 up the template with the copy_template function.""" 1825 1826 # We must link the TIR to the Library folder of the active Template 1827 link_tir_libs=[] 1828 tir_libs=[] 1829 tir_include=[] 1830 1831 for tir in self.all_tir: 1832 tir_dir="%s_dir"%tir 1833 libpath=getattr(self,tir_dir) 1834 libname="lib%s.a"%tir 1835 tir_name=tir 1836 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1837 libpath,libname,tir_name=tir_name) 1838 if libpath != "": 1839 if tir in ['ninja','pjfry','golem','samurai','collier']: 1840 # It is cleaner to use the original location of the libraries 1841 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1842 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1843 # For Ninja, we must also link against OneLoop. 1844 if tir in ['ninja']: 1845 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 1846 for ext in ['a','dylib','so']): 1847 raise MadGraph5Error( 1848 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 1849 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 1850 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 1851 if tir in ['ninja','golem', 'samurai','collier']: 1852 trgt_path = pjoin(os.path.dirname(libpath),'include') 1853 if os.path.isdir(trgt_path): 1854 to_include = misc.find_includes_path(trgt_path, 1855 self.include_names[tir]) 1856 else: 1857 to_include = None 1858 # Special possible location for collier 1859 if to_include is None and tir=='collier': 1860 to_include = misc.find_includes_path( 1861 pjoin(libpath,'modules'),self.include_names[tir]) 1862 if to_include is None: 1863 logger.error( 1864 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1865 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1866 to_include = '<Not_found_define_it_yourself>' 1867 tir_include.append('-I %s'%str(to_include)) 1868 # To be able to easily compile a MadLoop library using 1869 # makefiles built outside of the MG5_aMC framework 1870 # (such as what is done with the Sherpa interface), we 1871 # place here an easy handle on the golem includes 1872 name_map = {'golem':'golem95','samurai':'samurai', 1873 'ninja':'ninja','collier':'collier'} 1874 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1875 name='%s_include'%name_map[tir],abspath=True) 1876 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1877 name='%s_lib'%name_map[tir],abspath=True) 1878 else : 1879 link_tir_libs.append('-l%s'%tir) 1880 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1881 1882 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1883 'MadLoop_makefile_definitions') 1884 if os.path.isfile(MadLoop_makefile_definitions): 1885 os.remove(MadLoop_makefile_definitions) 1886 1887 calls = self.write_loop_makefile_definitions( 1888 writers.MakefileWriter(MadLoop_makefile_definitions), 1889 link_tir_libs,tir_libs, tir_include=tir_include) 1890 1891 # Finally overwrite MadLoopCommons.f now that we know the availibility of 1892 # COLLIER. 1893 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 1894 "SubProcesses","MadLoopCommons.inc")).read() 1895 writer = writers.FortranWriter(os.path.join(self.dir_path, 1896 "SubProcesses","MadLoopCommons.f")) 1897 writer.writelines(MadLoopCommon%{ 1898 'print_banner_commands':self.MadLoop_banner}, context={ 1899 'collier_available':self.tir_available_dict['collier']}) 1900 writer.close()
1901 1913 1914 2042
2043 - def set_group_loops(self, matrix_element):
2044 """ Decides whether we must group loops or not for this matrix element""" 2045 2046 # Decide if loops sharing same denominator structures have to be grouped 2047 # together or not. 2048 if self.forbid_loop_grouping: 2049 self.group_loops = False 2050 else: 2051 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 2052 and matrix_element.get('processes')[0].get('has_born') 2053 2054 return self.group_loops
2055
2056 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2057 """create the global information for loops""" 2058 2059 super(LoopProcessOptimizedExporterFortranSA,self).finalize(matrix_element, 2060 cmdhistory, MG5options, outputflag) 2061 self.write_global_specs(matrix_element)
2062 2063 2064
2065 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 2066 group_number = None, proc_id = None, config_map = None):
2067 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 2068 and loop_num.f only but with the optimized FortranModel. 2069 The arguments group_number and proc_id are just for the LoopInduced 2070 output with MadEvent and only used in get_ME_identifier.""" 2071 2072 # Warn the user that the 'matrix' output where all relevant code is 2073 # put together in a single file is not supported in this loop output. 2074 if writer: 2075 raise MadGraph5Error, 'Matrix output mode no longer supported.' 2076 2077 if not isinstance(fortran_model,\ 2078 helas_call_writers.FortranUFOHelasCallWriter): 2079 raise MadGraph5Error, 'The optimized loop fortran output can only'+\ 2080 ' work with a UFO Fortran model' 2081 OptimizedFortranModel=\ 2082 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 2083 fortran_model.get('model'),False) 2084 2085 2086 if not matrix_element.get('processes')[0].get('has_born') and \ 2087 not self.compute_color_flows: 2088 logger.debug("Color flows will be employed despite the option"+\ 2089 " 'loop_color_flows' being set to False because it is necessary"+\ 2090 " for optimizations.") 2091 2092 # Compute the analytical information of the loop wavefunctions in the 2093 # loop helas matrix elements using the cached aloha model to reuse 2094 # as much as possible the aloha computations already performed for 2095 # writing out the aloha fortran subroutines. 2096 matrix_element.compute_all_analytic_information( 2097 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 2098 2099 self.set_group_loops(matrix_element) 2100 2101 # Initialize a general replacement dictionary with entries common to 2102 # many files generated here. 2103 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 2104 generate_general_replace_dict(self, matrix_element, 2105 group_number = group_number, proc_id = proc_id) 2106 2107 # and those specific to the optimized output 2108 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 2109 2110 # Create the necessary files for the loop matrix element subroutine 2111 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 2112 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 2113 proc_prefix_writer.close() 2114 2115 filename = 'loop_matrix.f' 2116 calls = self.write_loopmatrix(writers.FortranWriter(filename), 2117 matrix_element, 2118 OptimizedFortranModel) 2119 2120 filename = 'check_sa.f' 2121 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 2122 2123 filename = 'polynomial.f' 2124 calls = self.write_polynomial_subroutines( 2125 writers.FortranWriter(filename), 2126 matrix_element) 2127 2128 filename = 'improve_ps.f' 2129 calls = self.write_improve_ps(writers.FortranWriter(filename), 2130 matrix_element) 2131 2132 filename = 'CT_interface.f' 2133 self.write_CT_interface(writers.FortranWriter(filename),\ 2134 matrix_element) 2135 2136 filename = 'TIR_interface.f' 2137 self.write_TIR_interface(writers.FortranWriter(filename), 2138 matrix_element) 2139 2140 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 2141 filename = 'GOLEM_interface.f' 2142 self.write_GOLEM_interface(writers.FortranWriter(filename), 2143 matrix_element) 2144 2145 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']: 2146 filename = 'COLLIER_interface.f' 2147 self.write_COLLIER_interface(writers.FortranWriter(filename), 2148 matrix_element) 2149 2150 filename = 'loop_num.f' 2151 self.write_loop_num(writers.FortranWriter(filename),\ 2152 matrix_element,OptimizedFortranModel) 2153 2154 filename = 'mp_compute_loop_coefs.f' 2155 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 2156 matrix_element,OptimizedFortranModel) 2157 2158 if self.get_context(matrix_element)['ComputeColorFlows']: 2159 filename = 'compute_color_flows.f' 2160 self.write_compute_color_flows(writers.FortranWriter(filename), 2161 matrix_element, config_map = config_map) 2162 2163 # Extract number of external particles 2164 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2165 filename = 'nexternal.inc' 2166 self.write_nexternal_file(writers.FortranWriter(filename), 2167 nexternal, ninitial) 2168 2169 # Write general process information 2170 filename = 'process_info.inc' 2171 self.write_process_info_file(writers.FortranWriter(filename), 2172 matrix_element) 2173 2174 if self.get_context(matrix_element)['TIRCaching']: 2175 filename = 'tir_cache_size.inc' 2176 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 2177 2178 return calls
2179
2180 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
2181 """ Specify the entries of the replacement dictionary which are specific 2182 to the optimized output and only relevant to it (the more general entries 2183 are set in the the mother class LoopProcessExporterFortranSA.""" 2184 2185 max_loop_rank=matrix_element.get_max_loop_rank() 2186 matrix_element.rep_dict['maxrank']=max_loop_rank 2187 matrix_element.rep_dict['loop_max_coefs']=\ 2188 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 2189 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 2190 matrix_element.rep_dict['vertex_max_coefs']=\ 2191 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 2192 2193 matrix_element.rep_dict['nloopwavefuncs']=\ 2194 matrix_element.get_number_of_loop_wavefunctions() 2195 max_spin=matrix_element.get_max_loop_particle_spin() 2196 2197 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 2198 matrix_element.rep_dict['nloops']=len(\ 2199 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 2200 lamp in ldiag.get_loop_amplitudes()]) 2201 2202 if self.set_group_loops(matrix_element): 2203 matrix_element.rep_dict['nloop_groups']=\ 2204 len(matrix_element.get('loop_groups')) 2205 else: 2206 matrix_element.rep_dict['nloop_groups']=\ 2207 matrix_element.rep_dict['nloops']
2208
2209 - def write_loop_num(self, writer, matrix_element,fortran_model):
2210 """ Create the file containing the core subroutine called by CutTools 2211 which contains the Helas calls building the loop""" 2212 2213 replace_dict=copy.copy(matrix_element.rep_dict) 2214 2215 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 2216 file = file % replace_dict 2217 writer.writelines(file,context=self.get_context(matrix_element))
2218
2219 - def write_CT_interface(self, writer, matrix_element):
2220 """ We can re-use the mother one for the loop optimized output.""" 2221 LoopProcessExporterFortranSA.write_CT_interface(\ 2222 self, writer, matrix_element,optimized_output=True)
2223
2224 - def write_TIR_interface(self, writer, matrix_element):
2225 """ Create the file TIR_interface.f which does NOT contain the subroutine 2226 defining the loop HELAS-like calls along with the general interfacing 2227 subroutine. """ 2228 2229 # First write TIR_interface which interfaces MG5 with TIR. 2230 replace_dict=copy.copy(matrix_element.rep_dict) 2231 2232 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 2233 2234 # Check which loops have an Higgs effective vertex so as to correctly 2235 # implement CutTools limitation 2236 loop_groups = matrix_element.get('loop_groups') 2237 has_HEFT_vertex = [False]*len(loop_groups) 2238 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2239 for lamp in loop_amp_list: 2240 final_lwf = lamp.get_final_loop_wavefunction() 2241 while not final_lwf is None: 2242 # We define here an HEFT vertex as any vertex built up from 2243 # only massless vectors and scalars (at least one of each) 2244 scalars = len([1 for wf in final_lwf.get('mothers') if 2245 wf.get('spin')==1]) 2246 vectors = len([1 for wf in final_lwf.get('mothers') if 2247 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2248 if scalars>=1 and vectors>=1 and \ 2249 scalars+vectors == len(final_lwf.get('mothers')): 2250 has_HEFT_vertex[i] = True 2251 break 2252 final_lwf = final_lwf.get_loop_mother() 2253 else: 2254 continue 2255 break 2256 2257 has_HEFT_list = [] 2258 chunk_size = 9 2259 for k in xrange(0, len(has_HEFT_vertex), chunk_size): 2260 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2261 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2262 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2263 has_HEFT_vertex[k:k + chunk_size]]))) 2264 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2265 2266 file = file % replace_dict 2267 2268 FPR = q_polynomial.FortranPolynomialRoutines( 2269 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2270 sub_prefix=replace_dict['proc_prefix']) 2271 if self.tir_available_dict['pjfry']: 2272 file += '\n\n'+FPR.write_pjfry_mapping() 2273 if self.tir_available_dict['iregi']: 2274 file += '\n\n'+FPR.write_iregi_mapping() 2275 2276 if writer: 2277 writer.writelines(file,context=self.get_context(matrix_element)) 2278 else: 2279 return file
2280
2281 - def write_COLLIER_interface(self, writer, matrix_element):
2282 """ Create the file COLLIER_interface.f""" 2283 2284 # First write GOLEM_interface which interfaces MG5 with TIR. 2285 replace_dict=copy.copy(matrix_element.rep_dict) 2286 2287 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read() 2288 2289 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2290 coef_format=replace_dict['complex_dp_format'],\ 2291 sub_prefix=replace_dict['proc_prefix']) 2292 map_definition = [] 2293 collier_map = FPR.get_COLLIER_mapping() 2294 2295 chunk_size = 10 2296 for map_name, indices_list in \ 2297 [('COEFMAP_ZERO',[c[0] for c in collier_map]), 2298 ('COEFMAP_ONE',[c[1] for c in collier_map]), 2299 ('COEFMAP_TWO',[c[2] for c in collier_map]), 2300 ('COEFMAP_THREE',[c[3] for c in collier_map])]: 2301 for k in xrange(0, len(indices_list), chunk_size): 2302 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \ 2303 (map_name,k, min(k + chunk_size, len(indices_list))-1, 2304 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size]))) 2305 2306 replace_dict['collier_coefmap'] = '\n'.join(map_definition) 2307 2308 file = file % replace_dict 2309 2310 if writer: 2311 writer.writelines(file,context=self.get_context(matrix_element)) 2312 else: 2313 return file
2314
2315 - def write_GOLEM_interface(self, writer, matrix_element):
2316 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2317 defining the loop HELAS-like calls along with the general interfacing 2318 subroutine. """ 2319 2320 # First write GOLEM_interface which interfaces MG5 with TIR. 2321 replace_dict=copy.copy(matrix_element.rep_dict) 2322 2323 # We finalize TIR result differently wether we used the built-in 2324 # squaring against the born. 2325 if not self.get_context(matrix_element)['AmplitudeReduction']: 2326 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2327 else: 2328 replace_dict['loop_induced_sqsoindex']='' 2329 2330 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2331 2332 file = file % replace_dict 2333 2334 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2335 coef_format=replace_dict['complex_dp_format'],\ 2336 sub_prefix=replace_dict['proc_prefix']) 2337 2338 file += '\n\n'+FPR.write_golem95_mapping() 2339 2340 if writer: 2341 writer.writelines(file,context=self.get_context(matrix_element)) 2342 else: 2343 return file
2344
2345 - def write_polynomial_subroutines(self,writer,matrix_element):
2346 """ Subroutine to create all the subroutines relevant for handling 2347 the polynomials representing the loop numerator """ 2348 2349 # First create 'loop_max_coefs.inc' 2350 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2351 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2352 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2353 %matrix_element.rep_dict) 2354 2355 # Then coef_specs directly in DHELAS if it does not exist already 2356 # 'coef_specs.inc'. If several processes exported different files there, 2357 # it is fine because the overall maximum value will overwrite it in the 2358 # end 2359 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2360 if not os.path.isfile(coef_specs_path): 2361 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2362 IncWriter.writelines("""INTEGER MAXLWFSIZE 2363 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2364 INTEGER VERTEXMAXCOEFS 2365 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2366 %matrix_element.rep_dict) 2367 IncWriter.close() 2368 2369 # List of all subroutines to place there 2370 subroutines=[] 2371 2372 # Start from the routine in the template 2373 replace_dict = copy.copy(matrix_element.rep_dict) 2374 2375 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2376 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2377 # The double precision version of the basic polynomial routines, such as 2378 # create_loop_coefs 2379 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2380 replace_dict['real_format'] = replace_dict['real_dp_format'] 2381 replace_dict['mp_prefix'] = '' 2382 replace_dict['kind'] = 8 2383 replace_dict['zero_def'] = '0.0d0' 2384 replace_dict['one_def'] = '1.0d0' 2385 dp_routine = dp_routine % replace_dict 2386 # The quadruple precision version of the basic polynomial routines 2387 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2388 replace_dict['real_format'] = replace_dict['real_mp_format'] 2389 replace_dict['mp_prefix'] = 'MP_' 2390 replace_dict['kind'] = 16 2391 replace_dict['zero_def'] = '0.0e0_16' 2392 replace_dict['one_def'] = '1.0e0_16' 2393 mp_routine = mp_routine % replace_dict 2394 subroutines.append(dp_routine) 2395 subroutines.append(mp_routine) 2396 2397 # Initialize the polynomial routine writer 2398 poly_writer=q_polynomial.FortranPolynomialRoutines( 2399 matrix_element.get_max_loop_rank(), 2400 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2401 sub_prefix=replace_dict['proc_prefix'], 2402 proc_prefix=replace_dict['proc_prefix'], 2403 mp_prefix='') 2404 # Write the polynomial constant module common to all 2405 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2406 2407 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2408 matrix_element.get_max_loop_rank(), 2409 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2410 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2411 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2412 # The eval subroutine 2413 subroutines.append(poly_writer.write_polynomial_evaluator()) 2414 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2415 # The add coefs subroutine 2416 subroutines.append(poly_writer.write_add_coefs()) 2417 subroutines.append(mp_poly_writer.write_add_coefs()) 2418 # The merging one for creating the loop coefficients 2419 subroutines.append(poly_writer.write_wl_merger()) 2420 subroutines.append(mp_poly_writer.write_wl_merger()) 2421 for wl_update in matrix_element.get_used_wl_updates(): 2422 # We pick here the most appropriate way of computing the 2423 # tensor product depending on the rank of the two tensors. 2424 # The various choices below come out from a careful comparison of 2425 # the different methods using the valgrind profiler 2426 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2427 # If any of the rank is 0, or if they are both equal to 1, 2428 # then we are better off using the full expanded polynomial, 2429 # and let the compiler optimize it. 2430 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2431 wl_update[0],wl_update[1])) 2432 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2433 wl_update[0],wl_update[1])) 2434 elif wl_update[0] >= wl_update[1]: 2435 # If the loop polynomial is larger then we will filter and loop 2436 # over the vertex coefficients first. The smallest product for 2437 # which the routines below could be used is then 2438 # loop_rank_2 x vertex_rank_1 2439 subroutines.append(poly_writer.write_compact_wl_updater(\ 2440 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2441 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2442 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2443 else: 2444 # This happens only when the rank of the updater (vertex coef) 2445 # is larger than the one of the loop coef and none of them is 2446 # zero. This never happens in renormalizable theories but it 2447 # can happen in the HEFT ones or other effective ones. In this 2448 # case the typicaly use of this routine if for the product 2449 # loop_rank_1 x vertex_rank_2 2450 subroutines.append(poly_writer.write_compact_wl_updater(\ 2451 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2452 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2453 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2454 2455 writer.writelines('\n\n'.join(subroutines), 2456 context=self.get_context(matrix_element))
2457
2458 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2459 """Create the write_mp_compute_loop_coefs.f file.""" 2460 2461 if not matrix_element.get('processes') or \ 2462 not matrix_element.get('diagrams'): 2463 return 0 2464 2465 # Set lowercase/uppercase Fortran code 2466 2467 writers.FortranWriter.downcase = False 2468 2469 replace_dict = copy.copy(matrix_element.rep_dict) 2470 2471 # Extract helas calls 2472 squared_orders = matrix_element.get_squared_order_contribs() 2473 split_orders = matrix_element.get('processes')[0].get('split_orders') 2474 2475 born_ct_helas_calls , uvct_helas_calls = \ 2476 fortran_model.get_born_ct_helas_calls(matrix_element, 2477 squared_orders=squared_orders, split_orders=split_orders) 2478 self.turn_to_mp_calls(born_ct_helas_calls) 2479 self.turn_to_mp_calls(uvct_helas_calls) 2480 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2481 matrix_element,group_loops=self.group_loops, 2482 squared_orders=squared_orders,split_orders=split_orders) 2483 # The proc_prefix must be replaced 2484 coef_construction = [c % matrix_element.rep_dict for c 2485 in coef_construction] 2486 self.turn_to_mp_calls(coef_construction) 2487 self.turn_to_mp_calls(coef_merging) 2488 2489 file = open(os.path.join(self.template_dir,\ 2490 'mp_compute_loop_coefs.inc')).read() 2491 2492 # Setup the contextual environment which is used in the splitting 2493 # functions below 2494 context = self.get_context(matrix_element) 2495 file=self.split_HELASCALLS(writer,replace_dict,\ 2496 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2497 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2498 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2499 continue_label = 2000, 2500 momenta_array_name = 'MP_P', 2501 context=context) 2502 file=self.split_HELASCALLS(writer,replace_dict,\ 2503 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2504 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2505 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2506 continue_label = 3000, 2507 momenta_array_name = 'MP_P', 2508 context=context) 2509 file=self.split_HELASCALLS(writer,replace_dict,\ 2510 'mp_helas_calls_split.inc',file,coef_construction,\ 2511 'mp_coef_construction','mp_coef_construction', 2512 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2513 continue_label = 4000, 2514 momenta_array_name = 'MP_P', 2515 context=context) 2516 2517 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2518 2519 file = file % replace_dict 2520 2521 # Write the file 2522 writer.writelines(file,context=context)
2523
2524 - def write_color_matrix_data_file(self, writer, col_matrix):
2525 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2526 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2527 2528 res = [] 2529 for line in range(len(col_matrix._col_basis1)): 2530 numerators = [] 2531 denominators = [] 2532 for row in range(len(col_matrix._col_basis2)): 2533 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2534 numerators.append('%6r'%coeff[0].numerator) 2535 denominators.append('%6r'%( 2536 coeff[0].denominator*(-1 if coeff[1] else 1))) 2537 res.append(' '.join(numerators)) 2538 res.append(' '.join(denominators)) 2539 2540 res.append('EOF') 2541 2542 writer.writelines('\n'.join(res))
2543
2544 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2545 color_basis):
2546 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2547 list of the color_amplitudes in the argument of this function.""" 2548 2549 my_cs = color.ColorString() 2550 2551 res = [] 2552 2553 for jamp_number, coeff_list in enumerate(color_amplitudes): 2554 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2555 # Order the ColorString so that its ordering is canonical. 2556 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2557 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2558 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2559 # A line element is a tuple (numerator, denominator, amplitude_id) 2560 line_element = [] 2561 2562 for (coefficient, amp_number) in coeff_list: 2563 coef = self.cat_coeff(\ 2564 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2565 line_element.append((coef[0].numerator, 2566 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2567 # Sort them by growing amplitude number 2568 line_element.sort(key=lambda el:el[2]) 2569 2570 for i in range(3): 2571 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2572 2573 res.append('EOF') 2574 writer.writelines('\n'.join(res))
2575
2576 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2577 """Writes the file compute_color_flows.f which uses the AMPL results 2578 from a common block to project them onto the color flow space so as 2579 to compute the JAMP quantities. For loop induced processes, this file 2580 will also contain a subroutine computing AMPL**2 for madevent 2581 multichanneling.""" 2582 2583 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2584 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2585 2586 dat_writer = open(pjoin('..','MadLoop5_resources', 2587 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2588 %matrix_element.rep_dict),'w') 2589 self.write_color_flow_coefs_data_file(dat_writer, 2590 loop_col_amps, matrix_element.get('loop_color_basis')) 2591 dat_writer.close() 2592 2593 dat_writer = open(pjoin('..','MadLoop5_resources', 2594 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2595 %matrix_element.rep_dict),'w') 2596 self.write_color_matrix_data_file(dat_writer, 2597 matrix_element.get('color_matrix')) 2598 dat_writer.close() 2599 2600 if matrix_element.get('processes')[0].get('has_born'): 2601 born_col_amps = matrix_element.get_born_color_amplitudes() 2602 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2603 dat_writer = open(pjoin('..','MadLoop5_resources', 2604 '%(proc_prefix)sBornColorFlowCoefs.dat' 2605 %matrix_element.rep_dict),'w') 2606 self.write_color_flow_coefs_data_file(dat_writer, 2607 born_col_amps, matrix_element.get('born_color_basis')) 2608 dat_writer.close() 2609 2610 dat_writer = open(pjoin('..','MadLoop5_resources', 2611 '%(proc_prefix)sBornColorFlowMatrix.dat' 2612 %matrix_element.rep_dict),'w') 2613 self.write_color_matrix_data_file(dat_writer, 2614 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2615 dat_writer.close() 2616 else: 2617 matrix_element.rep_dict['nBornFlows'] = 0 2618 2619 replace_dict = copy.copy(matrix_element.rep_dict) 2620 2621 # The following variables only have to be defined for the LoopInduced 2622 # output for madevent. 2623 if self.get_context(matrix_element)['MadEventOutput']: 2624 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2625 else: 2626 replace_dict['config_map_definition'] = '' 2627 replace_dict['config_index_map_definition'] = '' 2628 replace_dict['nmultichannels'] = 0 2629 replace_dict['nmultichannel_configs'] = 0 2630 2631 # The nmultichannels entry will be used in the matrix<i> wrappers as 2632 # well, so we add it to the general_replace_dict too. 2633 matrix_element.rep_dict['nmultichannels'] = \ 2634 replace_dict['nmultichannels'] 2635 matrix_element.rep_dict['nmultichannel_configs'] = \ 2636 replace_dict['nmultichannel_configs'] 2637 2638 2639 file = open(os.path.join(self.template_dir,\ 2640 'compute_color_flows.inc')).read()%replace_dict 2641 2642 writer.writelines(file,context=self.get_context(matrix_element))
2643
2644 - def write_global_specs(self, matrix_element_list, output_path=None):
2645 """ From the list of matrix element, or the single matrix element, derive 2646 the global quantities to write in global_coef_specs.inc""" 2647 2648 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList, 2649 loop_helas_objects.LoopHelasProcess)): 2650 matrix_element_list = matrix_element_list.get_matrix_elements() 2651 2652 if isinstance(matrix_element_list, list): 2653 me_list = matrix_element_list 2654 else: 2655 me_list = [matrix_element_list] 2656 2657 if output_path is None: 2658 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc') 2659 else: 2660 out_path = output_path 2661 2662 open(out_path,'w').write( 2663 """ integer MAXNEXTERNAL 2664 parameter(MAXNEXTERNAL=%d) 2665 integer OVERALLMAXRANK 2666 parameter(OVERALLMAXRANK=%d) 2667 integer NPROCS 2668 parameter(NPROCS=%d)"""%( 2669 max(me.get_nexternal_ninitial()[0] for me in me_list), 2670 max(me.get_max_loop_rank() for me in me_list), 2671 len(me_list)))
2672 2673
2674 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2675 """ If processes with different maximum loop wavefunction size or 2676 different maximum loop vertex rank have to be output together, then 2677 the file 'coef.inc' in the HELAS Source folder must contain the overall 2678 maximum of these quantities. It is not safe though, and the user has 2679 been appropriatly warned at the output stage """ 2680 2681 # Remove the existing link 2682 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2683 'coef_specs.inc') 2684 os.remove(coef_specs_path) 2685 2686 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2687 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2688 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2689 overall_max_loop_vert_rank) 2690 # Replace it by the appropriate value 2691 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2692 IncWriter.writelines("""INTEGER MAXLWFSIZE 2693 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2694 INTEGER VERTEXMAXCOEFS 2695 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2696 %{'max_lwf_size':overall_max_lwf_size, 2697 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2698 IncWriter.close()
2699
2700 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2701 split_orders,squared_orders,amps_orders):
2702 """ Sets up the replacement dictionary for the writeout of the steering 2703 file check_sa.f""" 2704 if len(squared_orders)<1: 2705 matrix_element.rep_dict['print_so_loop_results']=\ 2706 "write(*,*) 'No split orders defined.'" 2707 elif len(squared_orders)==1: 2708 matrix_element.rep_dict['set_coupling_target']='' 2709 matrix_element.rep_dict['print_so_loop_results']=\ 2710 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2711 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2712 for i in range(len(split_orders))])) 2713 else: 2714 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2715 '# Here we leave the default target squared split order to -1, meaning that we'+ 2716 ' aim at computing all individual contributions. You can choose otherwise.', 2717 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2718 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2719 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2720 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2721 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2722 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2723 "ELSE", 2724 "write(*,*) ' > accuracy = NA'", 2725 "ENDIF", 2726 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2727 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2728 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2729 ]) for j, so in enumerate(squared_orders)]) 2730 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2731 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2732 ['\n'.join([ 2733 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2734 ['%d'%so_value for so_value in so])), 2735 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2736 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2737 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2738 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2739 ]) for j, so in enumerate(squared_orders)]) 2740 2741 # We must reconstruct here the born squared orders. 2742 squared_born_so_orders = [] 2743 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2744 for j in range(0,i+1): 2745 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2746 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2747 if not key in squared_born_so_orders: 2748 squared_born_so_orders.append(key) 2749 if len(squared_born_so_orders)<1: 2750 matrix_element.rep_dict['print_so_born_results'] = '' 2751 elif len(squared_born_so_orders)==1: 2752 matrix_element.rep_dict['print_so_born_results'] = \ 2753 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2754 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2755 for i in range(len(split_orders))])) 2756 else: 2757 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2758 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2759 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2760 for j, so in enumerate(squared_born_so_orders)]) 2761 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2762 ['\n'.join([ 2763 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2764 ['%d'%so_value for so_value in so])), 2765 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2766 ]) for j, so in enumerate(squared_born_so_orders)]) 2767 2768 # Add a bottom bar to both print_so_[loop|born]_results 2769 matrix_element.rep_dict['print_so_born_results'] += \ 2770 '\nwrite (*,*) "---------------------------------"' 2771 matrix_element.rep_dict['print_so_loop_results'] += \ 2772 '\nwrite (*,*) "---------------------------------"'
2773
2774 - def write_tir_cache_size_include(self, writer):
2775 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2776 cache the the user wishes to employ and the default value for it. 2777 This can have an impact on MadLoop speed when using stability checks 2778 but also impacts in a non-negligible way MadLoop's memory footprint. 2779 It is therefore important that the user can chose its size.""" 2780 2781 # For the standalone optimized output, a size of one is necessary. 2782 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2783 # speed increase with a TIR cache of size 2 due to the structure of the 2784 # calls to MadLoop there. 2785 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2786 writer.writelines(tir_cach_size)
2787
2788 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2789 write_auxiliary_files=True,):
2790 """Create the loop_matrix.f file.""" 2791 2792 if not matrix_element.get('processes') or \ 2793 not matrix_element.get('diagrams'): 2794 return 0 2795 2796 # Set lowercase/uppercase Fortran code 2797 writers.FortranWriter.downcase = False 2798 2799 # Starting off with the treatment of the split_orders since some 2800 # of the information extracted there will come into the 2801 # general_replace_dict. Split orders are abbreviated SO in all the 2802 # keys of the replacement dictionaries. 2803 2804 # Take care of the split_orders 2805 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2806 # Creating here a temporary list containing only the information of 2807 # what are the different squared split orders contributing 2808 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2809 sqso_contribs = [sqso[0] for sqso in squared_orders] 2810 split_orders = matrix_element.get('processes')[0].get('split_orders') 2811 # The entries set in the function below are only for check_sa written 2812 # out in write_loop__matrix_element_v4 (it is however placed here because the 2813 # split order information is only available here). 2814 self.setup_check_sa_replacement_dictionary(matrix_element, 2815 split_orders,sqso_contribs,amps_orders) 2816 2817 # Now recast the split order basis for the loop, born and counterterm 2818 # amplitude into one single splitorderbasis. 2819 overall_so_basis = list(set( 2820 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2821 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2822 # We must re-sort it to make sure it follows an increasing WEIGHT order 2823 order_hierarchy = matrix_element.get('processes')[0]\ 2824 .get('model').get('order_hierarchy') 2825 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2826 set(order_hierarchy.keys()): 2827 overall_so_basis.sort(key= lambda so: 2828 sum([order_hierarchy[split_orders[i]]*order_power for \ 2829 i, order_power in enumerate(so)])) 2830 2831 # Those are additional entries used throughout the different files of 2832 # MadLoop5 2833 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2834 matrix_element.rep_dict['nSO'] = len(split_orders) 2835 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2836 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2837 2838 writers.FortranWriter('nsquaredSO.inc').writelines( 2839 """INTEGER NSQUAREDSO 2840 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2841 2842 replace_dict = copy.copy(matrix_element.rep_dict) 2843 # Build the general array mapping the split orders indices to their 2844 # definition 2845 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2846 overall_so_basis,'AMPSPLITORDERS')) 2847 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2848 sqso_contribs,'SQPLITORDERS')) 2849 2850 # Specify what are the squared split orders selected by the proc def. 2851 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2852 matrix_element.get('processes')[0],sqso_contribs) 2853 2854 # Now we build the different arrays storing the split_orders ID of each 2855 # amp. 2856 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2857 for SO in amps_orders['loop_amp_orders']: 2858 for amp_number in SO[1]: 2859 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2860 2861 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2862 ampSO_list,'LOOPAMPORDERS')) 2863 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2864 for SO in amps_orders['born_amp_orders']: 2865 for amp_number in SO[1]: 2866 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2867 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2868 ampSO_list,'BORNAMPORDERS')) 2869 2870 # We then go to the TIR setup 2871 # The first entry is the CutTools, we make sure it is available 2872 looplibs_av=['.TRUE.'] 2873 # one should be careful about the order in the following as it must match 2874 # the ordering in MadLoopParamsCard. 2875 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']: 2876 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2877 self.tir_available_dict[tir_lib] else '.FALSE.') 2878 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2879 2880 # Helicity offset convention 2881 # For a given helicity, the attached integer 'i' means 2882 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2883 # to helicity number abs(i+HELOFFSET) 2884 # 'i' == -HELOFFSET -> Helicity is analytically zero 2885 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2886 # If it is zero, it is skipped. 2887 # Typically, the hel_offset is 10000 2888 replace_dict['hel_offset'] = 10000 2889 2890 # Extract overall denominator 2891 # Averaging initial state color, spin, and identical FS particles 2892 den_factor_line = self.get_den_factor_line(matrix_element) 2893 replace_dict['den_factor_line'] = den_factor_line 2894 2895 # When the user asks for the polarized matrix element we must 2896 # multiply back by the helicity averaging factor 2897 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2898 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2899 matrix_element.get_beams_hel_avg_factor() 2900 2901 if write_auxiliary_files: 2902 # Write out the color matrix 2903 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2904 CMWriter=open(pjoin('..','MadLoop5_resources', 2905 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2906 for ColorLine in CMNum: 2907 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2908 CMWriter.close() 2909 CMWriter=open(pjoin('..','MadLoop5_resources', 2910 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2911 for ColorLine in CMDenom: 2912 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2913 CMWriter.close() 2914 2915 # Write out the helicity configurations 2916 HelConfigs=matrix_element.get_helicity_matrix() 2917 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2918 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2919 for HelConfig in HelConfigs: 2920 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2921 HelConfigWriter.close() 2922 2923 # Extract helas calls 2924 born_ct_helas_calls, uvct_helas_calls = \ 2925 fortran_model.get_born_ct_helas_calls(matrix_element, 2926 squared_orders=squared_orders,split_orders=split_orders) 2927 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2928 matrix_element,group_loops=self.group_loops, 2929 squared_orders=squared_orders,split_orders=split_orders) 2930 2931 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2932 group_loops=self.group_loops, 2933 squared_orders=squared_orders, split_orders=split_orders) 2934 # The proc_prefix must be replaced 2935 coef_construction = [c % matrix_element.rep_dict for c 2936 in coef_construction] 2937 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 2938 2939 file = open(os.path.join(self.template_dir,\ 2940 'loop_matrix_standalone.inc')).read() 2941 2942 # Setup the contextual environment which is used in the splitting 2943 # functions below 2944 context = self.get_context(matrix_element) 2945 file=self.split_HELASCALLS(writer,replace_dict,\ 2946 'helas_calls_split.inc',file,born_ct_helas_calls,\ 2947 'born_ct_helas_calls','helas_calls_ampb', 2948 required_so_broadcaster = 'CT_REQ_SO_DONE', 2949 continue_label = 2000, context = context) 2950 file=self.split_HELASCALLS(writer,replace_dict,\ 2951 'helas_calls_split.inc',file,uvct_helas_calls,\ 2952 'uvct_helas_calls','helas_calls_uvct', 2953 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 2954 continue_label = 3000, context=context) 2955 file=self.split_HELASCALLS(writer,replace_dict,\ 2956 'helas_calls_split.inc',file,coef_construction,\ 2957 'coef_construction','coef_construction', 2958 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 2959 continue_label = 4000, context=context) 2960 file=self.split_HELASCALLS(writer,replace_dict,\ 2961 'helas_calls_split.inc',file,loop_CT_calls,\ 2962 'loop_CT_calls','loop_CT_calls', 2963 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 2964 continue_label = 5000, context=context) 2965 2966 # Add the entries above to the general_replace_dict so that it can be 2967 # used by write_mp_compute_loop_coefs later 2968 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 2969 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 2970 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 2971 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 2972 2973 replace_dict['coef_merging']='\n'.join(coef_merging) 2974 2975 file = file % replace_dict 2976 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \ 2977 loop_CT_calls)) 2978 if writer: 2979 # Write the file 2980 writer.writelines(file,context=context) 2981 return number_of_calls 2982 else: 2983 # Return it to be written along with the others 2984 return number_of_calls, file
2985 2986 #=============================================================================== 2987 # LoopProcessExporterFortranSA 2988 #===============================================================================
2989 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 2990 export_v4.ProcessExporterFortranMatchBox):
2991 """Class to take care of exporting a set of loop matrix elements in the 2992 Fortran format.""" 2993 2994 default_opt = {'clean': False, 'complex_mass':False, 2995 'export_format':'madloop_matchbox', 'mp':True, 2996 'loop_dir':'', 'cuttools_dir':'', 2997 'fortran_compiler':'gfortran', 2998 'output_dependencies':'external', 2999 'sa_symmetry':True} 3000 3001 3002
3003 - def get_color_string_lines(self, matrix_element):
3004 """Return the color matrix definition lines for this matrix element. Split 3005 rows in chunks of size n.""" 3006 3007 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
3008 3009
3010 - def get_JAMP_lines(self, *args, **opts):
3011 """Adding leading color part of the colorflow""" 3012 3013 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
3014
3015 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
3016 """ To not mix notations between borns and virtuals we call it here also MG5 """ 3017 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
3018 3019 3020 #=============================================================================== 3021 # LoopInducedExporter 3022 #===============================================================================
3023 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
3024 """ A class to specify all the functions common to LoopInducedExporterMEGroup 3025 and LoopInducedExporterMENoGroup (but not relevant for the original 3026 Madevent exporters)""" 3027 3028 madloop_makefile_name = 'makefile_MadLoop' 3029 3030
3031 - def __init__(self, *args, **opts):
3032 """ Initialize the process, setting the proc characteristics.""" 3033 super(LoopInducedExporterME, self).__init__(*args, **opts) 3034 self.proc_characteristic['loop_induced'] = True
3035
3036 - def get_context(self,*args,**opts):
3037 """ Make sure that the contextual variable MadEventOutput is set to 3038 True for this exporter""" 3039 3040 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 3041 context['MadEventOutput'] = True 3042 return context
3043 3044
3045 - def get_source_libraries_list(self):
3046 """ Returns the list of libraries to be compiling when compiling the 3047 SOURCE directory. It is different for loop_induced processes and 3048 also depends on the value of the 'output_dependencies' option""" 3049 3050 libraries_list = super(LoopInducedExporterME,self).\ 3051 get_source_libraries_list() 3052 3053 if self.dependencies=='internal': 3054 libraries_list.append('$(LIBDIR)libcts.$(libext)') 3055 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 3056 3057 return libraries_list
3058 3065
3066 - def copy_template(self, *args, **opts):
3067 """Pick the right mother functions 3068 """ 3069 # Call specifically the necessary building functions for the mixed 3070 # template setup for both MadEvent and MadLoop standalone 3071 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 3072 copy_Source_makefile=False) 3073 3074 LoopProcessOptimizedExporterFortranSA.\ 3075 loop_optimized_additional_template_setup(self)
3076 3077 3078 #=========================================================================== 3079 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 3080 #===========================================================================
3081 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3082 """Function to finalize v4 directory, for inheritance. 3083 """ 3084 3085 self.proc_characteristic['loop_induced'] = True 3086 3087 # This can be uncommented if one desires to have the MadLoop 3088 # initialization performed at the end of the output phase. 3089 # Alternatively, one can simply execute the command 'initMadLoop' in 3090 # the madevent interactive interface after the output. 3091 # from madgraph.interface.madevent_interface import MadLoopInitializer 3092 # MadLoopInitializer.init_MadLoop(self.dir_path, 3093 # subproc_prefix=self.SubProc_prefix, MG_options=None) 3094 3095 self.write_global_specs(matrix_elements)
3096
3097 - def write_tir_cache_size_include(self, writer):
3098 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 3099 cache the the user wishes to employ and the default value for it. 3100 This can have an impact on MadLoop speed when using stability checks 3101 but also impacts in a non-negligible way MadLoop's memory footprint. 3102 It is therefore important that the user can chose its size.""" 3103 3104 # In this case of MadLoop+MadEvent output, we set it to 2 because we 3105 # gain further speed increase with a TIR cache of size 2 due to the 3106 # the fact that we call MadLoop once per helicity configuration in this 3107 # case. 3108 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 3109 writer.writelines(tir_cach_size)
3110
3111 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 3112 proc_id = None, config_map = [], subproc_number = None):
3113 """ Write it the wrapper to call the ML5 subroutine in the library.""" 3114 3115 # Generating the MadEvent wrapping ME's routines 3116 if not matrix_element.get('processes') or \ 3117 not matrix_element.get('diagrams'): 3118 return 0 3119 3120 if not isinstance(writer, writers.FortranWriter): 3121 raise writers.FortranWriter.FortranWriterError(\ 3122 "writer not FortranWriter") 3123 3124 replace_dict = copy.copy(matrix_element.rep_dict) 3125 3126 # Extract version number and date from VERSION file 3127 info_lines = self.get_mg5_info_lines() 3128 replace_dict['info_lines'] = info_lines 3129 3130 # Extract process info lines 3131 process_lines = self.get_process_info_lines(matrix_element) 3132 replace_dict['process_lines'] = process_lines 3133 3134 # Set proc_id 3135 # It can be set to None when write_matrix_element_v4 is called without 3136 # grouping. In this case the subroutine SMATRIX should take an empty 3137 # suffix. 3138 if proc_id is None: 3139 replace_dict['proc_id'] = '' 3140 else: 3141 replace_dict['proc_id'] = proc_id 3142 3143 #set the average over the number of initial helicities 3144 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 3145 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 3146 matrix_element.get_beams_hel_avg_factor() 3147 3148 # Extract helicity lines 3149 helicity_lines = self.get_helicity_lines(matrix_element) 3150 replace_dict['helicity_lines'] = helicity_lines 3151 3152 3153 # Extract ndiags 3154 ndiags = len(matrix_element.get('diagrams')) 3155 replace_dict['ndiags'] = ndiags 3156 3157 # Set define_iconfigs_lines 3158 replace_dict['define_iconfigs_lines'] = \ 3159 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 3160 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 3161 3162 if proc_id: 3163 # Set lines for subprocess group version 3164 # Set define_iconfigs_lines 3165 replace_dict['define_iconfigs_lines'] += \ 3166 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3167 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3168 # Set set_amp2_line 3169 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 3170 else: 3171 # Standard running 3172 # Set set_amp2_line 3173 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 3174 3175 # If group_numer 3176 replace_dict['ml_prefix'] = \ 3177 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 3178 3179 # Extract ncolor 3180 ncolor = max(1, len(matrix_element.get('color_basis'))) 3181 replace_dict['ncolor'] = ncolor 3182 3183 n_tot_diags = len(matrix_element.get_loop_diagrams()) 3184 replace_dict['n_tot_diags'] = n_tot_diags 3185 3186 file = open(pjoin(_file_path, \ 3187 'iolibs/template_files/%s' % self.matrix_file)).read() 3188 file = file % replace_dict 3189 3190 # Write the file 3191 writer.writelines(file) 3192 3193 return 0, ncolor
3194
3195 - def get_amp2_lines(self, *args, **opts):
3196 """Make sure the function is implemented in the daughters""" 3197 3198 raise NotImplemented, 'The function get_amp2_lines must be called in '+\ 3199 ' the daugthers of LoopInducedExporterME'
3200 3201 #=============================================================================== 3202 # LoopInducedExporterMEGroup 3203 #===============================================================================
3204 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 3205 export_v4.ProcessExporterFortranMEGroup):
3206 """Class to take care of exporting a set of grouped loop induced matrix 3207 elements""" 3208 3209 matrix_file = "matrix_loop_induced_madevent_group.inc" 3210 3216
3217 - def write_source_makefile(self, *args, **opts):
3218 """Pick the correct write_source_makefile function from 3219 ProcessExporterFortranMEGroup""" 3220 3221 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 3222 *args, **opts)
3223
3224 - def copy_template(self, *args, **opts):
3225 """Pick the right mother functions 3226 """ 3227 # Call specifically the necessary building functions for the mixed 3228 # template setup for both MadEvent and MadLoop standalone 3229 3230 # Start witht the MadEvent one 3231 export_v4.ProcessExporterFortranMEGroup.copy_template(self,*args,**opts) 3232 3233 # Then the MadLoop-standalone related one 3234 LoopInducedExporterME.copy_template(self, *args, **opts)
3235
3236 - def finalize(self, *args, **opts):
3237 """Pick the right mother functions 3238 """ 3239 # Call specifically what finalize_v4_directory must be used, so that the 3240 # MRO doesn't interfere. 3241 3242 self.proc_characteristic['loop_induced'] = True 3243 3244 export_v4.ProcessExporterFortranMEGroup.finalize(self,*args,**opts) 3245 3246 # And the finilize from LoopInducedExporterME which essentially takes 3247 # care of MadLoop virtuals initialization 3248 LoopInducedExporterME.finalize(self,*args,**opts)
3249
3250 - def generate_subprocess_directory(self, subproc_group, 3251 fortran_model,group_number):
3252 """Generate the Pn directory for a subprocess group in MadEvent, 3253 including the necessary matrix_N.f files, configs.inc and various 3254 other helper files""" 3255 3256 # Generate the MadLoop files 3257 calls = 0 3258 matrix_elements = subproc_group.get('matrix_elements') 3259 for ime, matrix_element in enumerate(matrix_elements): 3260 self.unique_id +=1 3261 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 3262 group_number = group_number, proc_id = str(ime+1), 3263 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 3264 config_map = subproc_group.get('diagram_maps')[ime], 3265 unique_id=self.unique_id) 3266 3267 # Then generate the MadEvent files 3268 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory( 3269 self, subproc_group,fortran_model,group_number) 3270 3271 return calls
3272
3273 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3274 """Return the various replacement dictionary inputs necessary for the 3275 multichanneling amp2 definition for the loop-induced MadEvent output. 3276 """ 3277 3278 if not config_map: 3279 raise MadGraph5Error, 'A multi-channeling configuration map is '+\ 3280 ' necessary for the MadEvent Loop-induced output with grouping.' 3281 3282 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3283 3284 ret_lines = [] 3285 # In this case, we need to sum up all amplitudes that have 3286 # identical topologies, as given by the config_map (which 3287 # gives the topology/config for each of the diagrams 3288 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement): 3289 diagrams = matrix_element.get_loop_diagrams() 3290 else: 3291 diagrams = matrix_element.get('diagrams') 3292 3293 # Note that we need to use AMP2 number corresponding to the first 3294 # diagram number used for that AMP2. 3295 # The dictionary below maps the config ID to this corresponding first 3296 # diagram number 3297 config_index_map = {} 3298 # For each diagram number, the dictionary below gives the config_id it 3299 # belongs to or 0 if it doesn't belong to any. 3300 loop_amp_ID_to_config = {} 3301 3302 # Combine the diagrams with identical topologies 3303 config_to_diag_dict = {} 3304 for idiag, diag in enumerate(diagrams): 3305 try: 3306 config_to_diag_dict[config_map[idiag]].append(idiag) 3307 except KeyError: 3308 config_to_diag_dict[config_map[idiag]] = [idiag] 3309 3310 for config in sorted(config_to_diag_dict.keys()): 3311 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3312 3313 # First add the UV and R2 counterterm amplitudes of each selected 3314 # diagram for the multichannel config 3315 CT_amp_numbers = [a.get('number') for a in \ 3316 sum([diagrams[idiag].get_ct_amplitudes() for \ 3317 idiag in config_to_diag_dict[config]], [])] 3318 3319 for CT_amp_number in CT_amp_numbers: 3320 loop_amp_ID_to_config[CT_amp_number] = config 3321 3322 # Now add here the loop amplitudes. 3323 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3324 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3325 idiag in config_to_diag_dict[config]], [])] 3326 3327 for loop_amp_number in loop_amp_numbers: 3328 loop_amp_ID_to_config[loop_amp_number] = config 3329 3330 # Notice that the config_id's are not necessarily sequential here, so 3331 # the size of the config_index_map array has to be the maximum over all 3332 # config_ids. 3333 # config_index_map should never be empty unless there was no diagram, 3334 # so the expression below is ok. 3335 n_configs = max(config_index_map.keys()) 3336 replace_dict['nmultichannel_configs'] = n_configs 3337 3338 # We must fill the empty entries of the map with the dummy amplitude 3339 # number 0. 3340 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3341 for i in range(1,n_configs+1)] 3342 # Now the placeholder 'nmultichannels' refers to the number of 3343 # multi-channels which are contributing, so we must filter out zeros. 3344 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3345 3346 # Now write the amp2 related inputs in the replacement dictionary 3347 res_list = [] 3348 chunk_size = 6 3349 for k in xrange(0, len(conf_list), chunk_size): 3350 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3351 (k + 1, min(k + chunk_size, len(conf_list)), 3352 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3353 3354 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3355 3356 res_list = [] 3357 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3358 amp_list = [loop_amp_ID_to_config[i] for i in \ 3359 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3360 chunk_size = 6 3361 for k in xrange(0, len(amp_list), chunk_size): 3362 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3363 (k + 1, min(k + chunk_size, len(amp_list)), 3364 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3365 3366 replace_dict['config_map_definition'] = '\n'.join(res_list) 3367 3368 return
3369 3370 #=============================================================================== 3371 # LoopInducedExporterMENoGroup 3372 #===============================================================================
3373 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3374 export_v4.ProcessExporterFortranME):
3375 """Class to take care of exporting a set of individual loop induced matrix 3376 elements""" 3377 3378 matrix_file = "matrix_loop_induced_madevent.inc" 3379 3385
3386 - def write_source_makefile(self, *args, **opts):
3387 """Pick the correct write_source_makefile function from 3388 ProcessExporterFortran""" 3389 3390 super(export_v4.ProcessExporterFortranME,self).\ 3391 write_source_makefile(*args, **opts)
3392
3393 - def copy_template(self, *args, **opts):
3394 """Pick the right mother functions 3395 """ 3396 # Call specifically the necessary building functions for the mixed 3397 # template setup for both MadEvent and MadLoop standalone 3398 3399 # Start witht the MadEvent one 3400 export_v4.ProcessExporterFortranME.copy_template(self,*args,**opts) 3401 3402 # Then the MadLoop-standalone related one 3403 LoopInducedExporterME.copy_template(self, *args, **opts)
3404
3405 - def finalize(self, *args, **opts):
3406 """Pick the right mother functions 3407 """ 3408 3409 self.proc_characteristic['loop_induced'] = True 3410 # Call specifically what finalize must be used, so that the 3411 # MRO doesn't interfere. 3412 export_v4.ProcessExporterFortranME.finalize(self, *args, **opts) 3413 3414 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3415 # care of MadLoop virtuals initialization 3416 LoopInducedExporterME.finalize(self, *args, **opts)
3417
3418 - def generate_subprocess_directory(self, matrix_element, fortran_model, me_number):
3419 """Generate the Pn directory for a subprocess group in MadEvent, 3420 including the necessary matrix_N.f files, configs.inc and various 3421 other helper files""" 3422 3423 self.unique_id += 1 3424 # Then generate the MadLoop files 3425 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3426 group_number = me_number, 3427 unique_id=self.unique_id) 3428 3429 3430 # First generate the MadEvent files 3431 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory( 3432 self, matrix_element, fortran_model, me_number) 3433 return calls
3434
3435 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3436 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3437 3438 if config_map: 3439 raise MadGraph5Error, 'A configuration map should not be specified'+\ 3440 ' for the Loop induced exporter without grouping.' 3441 3442 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3443 # Get minimum legs in a vertex 3444 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3445 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3446 minvert = min(vert_list) if vert_list!=[] else 0 3447 3448 # Note that we need to use AMP2 number corresponding to the first 3449 # diagram number used for that AMP2. 3450 # The dictionary below maps the config ID to this corresponding first 3451 # diagram number 3452 config_index_map = {} 3453 # For each diagram number, the dictionary below gives the config_id it 3454 # belongs to or 0 if it doesn't belong to any. 3455 loop_amp_ID_to_config = {} 3456 3457 n_configs = 0 3458 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3459 # Ignore any diagrams with 4-particle vertices. 3460 use_for_multichanneling = True 3461 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3462 use_for_multichanneling = False 3463 curr_config = 0 3464 else: 3465 n_configs += 1 3466 curr_config = n_configs 3467 3468 if not use_for_multichanneling: 3469 if 0 not in config_index_map: 3470 config_index_map[0] = idiag + 1 3471 else: 3472 config_index_map[curr_config] = idiag + 1 3473 3474 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3475 for CT_amp in CT_amps: 3476 loop_amp_ID_to_config[CT_amp] = curr_config 3477 3478 Loop_amps = [a.get('amplitudes')[0].get('number') 3479 for a in diag.get_loop_amplitudes()] 3480 for Loop_amp in Loop_amps: 3481 loop_amp_ID_to_config[Loop_amp] = curr_config 3482 3483 # Now write the amp2 related inputs in the replacement dictionary 3484 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3485 replace_dict['nmultichannel_configs'] = n_configs 3486 # Now the placeholder 'nmultichannels' refers to the number of 3487 # multi-channels which are contributing which, in the non-grouped case 3488 # is always equal to the total number of multi-channels. 3489 replace_dict['nmultichannels'] = n_configs 3490 3491 res_list = [] 3492 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3493 if i!=0] 3494 chunk_size = 6 3495 for k in xrange(0, len(conf_list), chunk_size): 3496 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3497 (k + 1, min(k + chunk_size, len(conf_list)), 3498 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3499 3500 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3501 3502 res_list = [] 3503 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3504 amp_list = [loop_amp_ID_to_config[i] for i in \ 3505 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3506 chunk_size = 6 3507 for k in xrange(0, len(amp_list), chunk_size): 3508 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3509 (k + 1, min(k + chunk_size, len(amp_list)), 3510 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3511 3512 replace_dict['config_map_definition'] = '\n'.join(res_list)
3513