Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  import copy 
  18  import fractions 
  19  import glob 
  20  import logging 
  21  import os 
  22  import stat 
  23  import sys 
  24  import re 
  25  import shutil 
  26  import subprocess 
  27  import itertools 
  28  import time 
  29  import datetime 
  30   
  31   
  32  import aloha 
  33   
  34  import madgraph.core.base_objects as base_objects 
  35  import madgraph.core.color_algebra as color 
  36  import madgraph.core.helas_objects as helas_objects 
  37  import madgraph.iolibs.drawing_eps as draw 
  38  import madgraph.iolibs.files as files 
  39  import madgraph.iolibs.group_subprocs as group_subprocs 
  40  import madgraph.various.misc as misc 
  41  import madgraph.various.q_polynomial as q_polynomial 
  42  import madgraph.iolibs.file_writers as writers 
  43  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  44  import madgraph.iolibs.template_files as template_files 
  45  import madgraph.iolibs.ufo_expression_parsers as parsers 
  46  import madgraph.iolibs.export_v4 as export_v4 
  47  import madgraph.various.diagram_symmetry as diagram_symmetry 
  48  import madgraph.various.process_checks as process_checks 
  49  import madgraph.various.progressbar as pbar 
  50  import madgraph.various.q_polynomial as q_polynomial 
  51  import madgraph.core.color_amp as color_amp 
  52  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  53  import models.check_param_card as check_param_card 
  54  from madgraph.loop.loop_base_objects import LoopDiagram 
  55  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  56   
  57  import madgraph.various.banner as banner_mod 
  58   
  59  pjoin = os.path.join 
  60   
  61  import aloha.create_aloha as create_aloha 
  62  import models.write_param_card as param_writer 
  63  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  64  from madgraph.iolibs.files import cp, ln, mv 
  65  pjoin = os.path.join 
  66  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  67  logger = logging.getLogger('madgraph.loop_exporter') 
  68   
  69  #=============================================================================== 
  70  # LoopExporterFortran 
  71  #=============================================================================== 
72 -class LoopExporterFortran(object):
73 """ Class to define general helper functions to the different 74 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 75 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 76 It plays the same role as ProcessExporterFrotran and simply defines here 77 loop-specific helpers functions necessary for all loop exporters. 78 Notice that we do not have LoopExporterFortran inheriting from 79 ProcessExporterFortran but give access to arguments like dir_path and 80 clean using options. This avoids method resolution object ambiguity""" 81 82 default_opt = {'clean': False, 'complex_mass':False, 83 'export_format':'madloop', 'mp':True, 84 'loop_dir':'', 'cuttools_dir':'', 85 'fortran_compiler':'gfortran', 86 'SubProc_prefix': 'P', 87 'output_dependencies': 'external', 88 'compute_color_flows': False, 89 'mode':''} 90 91 include_names = {'ninja' : 'mninja.mod', 92 'golem' : 'generic_function_1p.mod', 93 'samurai':'msamurai.mod'} 94
95 - def __init__(self, mgme_dir="", dir_path = "", opt=None):
96 """Initiate the LoopExporterFortran with directory information on where 97 to find all the loop-related source files, like CutTools""" 98 99 100 self.opt = dict(self.default_opt) 101 if opt: 102 self.opt.update(opt) 103 104 self.SubProc_prefix = self.opt['SubProc_prefix'] 105 self.loop_dir = self.opt['loop_dir'] 106 self.cuttools_dir = self.opt['cuttools_dir'] 107 self.fortran_compiler = self.opt['fortran_compiler'] 108 self.dependencies = self.opt['output_dependencies'] 109 self.compute_color_flows = self.opt['compute_color_flows'] 110 111 super(LoopExporterFortran,self).__init__(mgme_dir, dir_path, self.opt)
112 113 175
176 - def get_aloha_model(self, model):
177 """ Caches the aloha model created here as an attribute of the loop 178 exporter so that it can later be used in the LoopHelasMatrixElement 179 in the function compute_all_analytic_information for recycling aloha 180 computations across different LoopHelasMatrixElements steered by the 181 same loop exporter. 182 """ 183 if not hasattr(self, 'aloha_model'): 184 self.aloha_model = create_aloha.AbstractALOHAModel(model.get('name')) 185 return self.aloha_model
186 187 #=========================================================================== 188 # write the multiple-precision header files 189 #===========================================================================
190 - def write_mp_files(self, writer_mprec, writer_mpc):
191 """Write the cts_mprec.h and cts_mpc.h""" 192 193 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 194 writer_mprec.writelines(file) 195 196 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 197 file = file.replace('&','') 198 writer_mpc.writelines(file) 199 200 return True
201 202 #=============================================================================== 203 # LoopProcessExporterFortranSA 204 #===============================================================================
205 -class LoopProcessExporterFortranSA(LoopExporterFortran, 206 export_v4.ProcessExporterFortranSA):
207 208 """Class to take care of exporting a set of loop matrix elements in the 209 Fortran format.""" 210 211 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 212 madloop_makefile_name = 'makefile' 213 214 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 215 style='classic2', color='green', 216 top_frame_char = '=', bottom_frame_char = '=', 217 left_frame_char = '{',right_frame_char = '}', 218 print_frame=True, side_margin = 7, up_margin = 1) 219
220 - def copy_v4template(self, modelname = ''):
221 """Additional actions needed to setup the Template. 222 """ 223 super(LoopProcessExporterFortranSA, self).copy_v4template(modelname) 224 225 self.loop_additional_template_setup()
226
227 - def loop_additional_template_setup(self, copy_Source_makefile = True):
228 """ Perform additional actions specific for this class when setting 229 up the template with the copy_v4template function.""" 230 231 # We must change some files to their version for NLO computations 232 cpfiles= ["Cards/MadLoopParams.dat", 233 "SubProcesses/MadLoopParamReader.f", 234 "SubProcesses/MadLoopParams.inc"] 235 if copy_Source_makefile: 236 cpfiles.append("Source/makefile") 237 238 for file in cpfiles: 239 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 240 os.path.join(self.dir_path, file)) 241 242 # Also put a copy of MadLoopParams.dat into MadLoopParams_default.dat 243 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), 244 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat')) 245 246 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.loop_dir,'StandAlone', 247 'Cards', 'MadLoopParams.dat')) 248 # write the output file 249 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses", 250 "MadLoopParams.dat")) 251 252 # We might need to give a different name to the MadLoop makefile\ 253 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 254 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 255 256 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 257 # for the non-optimized output 258 link_tir_libs=[] 259 tir_libs=[] 260 261 filePath = pjoin(self.dir_path, 'SubProcesses', 262 'MadLoop_makefile_definitions') 263 calls = self.write_loop_makefile_definitions( 264 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 265 266 267 # We need minimal editing of MadLoopCommons.f 268 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 269 "SubProcesses","MadLoopCommons.inc")).read() 270 writer = writers.FortranWriter(os.path.join(self.dir_path, 271 "SubProcesses","MadLoopCommons.f")) 272 writer.writelines(MadLoopCommon%{ 273 'print_banner_commands':self.MadLoop_banner}) 274 writer.close() 275 276 277 # Copy the whole MadLoop5_resources directory (empty at this stage) 278 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 279 'MadLoop5_resources')): 280 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 281 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 282 283 # Link relevant cards from Cards inside the MadLoop5_resources 284 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 285 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 286 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 287 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 288 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 289 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 290 291 # And remove check_sa in the SubProcess folder since now there is a 292 # check_sa tailored to each subprocess. 293 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 294 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 295 296 cwd = os.getcwd() 297 dirpath = os.path.join(self.dir_path, 'SubProcesses') 298 try: 299 os.chdir(dirpath) 300 except os.error: 301 logger.error('Could not cd to directory %s' % dirpath) 302 return 0 303 304 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 305 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 306 writers.FortranWriter('cts_mpc.h')) 307 308 # Return to original PWD 309 os.chdir(cwd) 310 311 # We must link the CutTools to the Library folder of the active Template 312 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
313 314 # This function is placed here and not in optimized exporterd, 315 # because the same makefile.inc should be used in all cases.
316 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 317 tir_libs,tir_include=[]):
318 """ Create the file makefile which links to the TIR libraries.""" 319 320 file = open(os.path.join(self.loop_dir,'StandAlone', 321 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 322 replace_dict={} 323 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 324 replace_dict['tir_libs']=' '.join(tir_libs) 325 replace_dict['dotf']='%.f' 326 replace_dict['prefix']= self.SubProc_prefix 327 replace_dict['doto']='%.o' 328 replace_dict['tir_include']=' '.join(tir_include) 329 file=file%replace_dict 330 if writer: 331 writer.writelines(file) 332 else: 333 return file
334
335 - def convert_model_to_mg4(self, model, wanted_lorentz = [], 336 wanted_couplings = []):
337 """ Caches the aloha model created here when writing out the aloha 338 fortran subroutine. 339 """ 340 self.get_aloha_model(model) 341 super(LoopProcessExporterFortranSA, self).convert_model_to_mg4(model, 342 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
343
344 - def get_ME_identifier(self, matrix_element, 345 group_number = None, group_elem_number = None):
346 """ A function returning a string uniquely identifying the matrix 347 element given in argument so that it can be used as a prefix to all 348 MadLoop5 subroutines and common blocks related to it. This allows 349 to compile several processes into one library as requested by the 350 BLHA (Binoth LesHouches Accord) guidelines. 351 The arguments group_number and proc_id are just for the LoopInduced 352 output with MadEvent.""" 353 354 # When disabling the loop grouping in the LoopInduced MadEvent output, 355 # we have only the group_number set and the proc_id set to None. In this 356 # case we don't print the proc_id. 357 if (not group_number is None) and group_elem_number is None: 358 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 359 group_number) 360 elif group_number is None or group_elem_number is None: 361 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 362 else: 363 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 364 group_number, group_elem_number)
365
366 - def get_SubProc_folder_name(self, process, 367 group_number = None, group_elem_number = None):
368 """Returns the name of the SubProcess directory, which can contain 369 the process goup and group element number for the case of loop-induced 370 integration with MadEvent.""" 371 372 # When disabling the loop grouping in the LoopInduced MadEvent output, 373 # we have only the group_number set and the proc_id set to None. In this 374 # case we don't print the proc_id. 375 if not group_number is None and group_elem_number is None: 376 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 377 group_number,process.shell_string(print_id=False)) 378 elif group_number is None or group_elem_number is None: 379 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 380 else: 381 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 382 group_number, group_elem_number,process.shell_string(print_id=False))
383 384 #=========================================================================== 385 # Set the compiler to be gfortran for the loop processes. 386 #===========================================================================
387 - def compiler_choice(self, compiler=export_v4.default_compiler):
388 """ Different daughter classes might want different compilers. 389 Here, the gfortran compiler is used throughout the compilation 390 (mandatory for CutTools written in f90) """ 391 if isinstance(compiler, str): 392 fortran_compiler = compiler 393 compiler = export_v4.default_compiler 394 compiler['fortran'] = fortran_compiler 395 396 if not compiler['fortran'] is None and not \ 397 any([name in compiler['fortran'] for name in \ 398 ['gfortran','ifort']]): 399 logger.info('For loop processes, the compiler must be fortran90'+\ 400 'compatible, like gfortran.') 401 compiler['fortran'] = 'gfortran' 402 self.set_compiler(compiler,True) 403 else: 404 self.set_compiler(compiler) 405 406 self.set_cpp_compiler(compiler['cpp'])
407
408 - def turn_to_mp_calls(self, helas_calls_list):
409 # Prepend 'MP_' to all the helas calls in helas_calls_list. 410 # Might look like a brutal unsafe implementation, but it is not as 411 # these calls are built from the properties of the HELAS objects and 412 # whether they are evaluated in double or quad precision is none of 413 # their business but only relevant to the output algorithm. 414 # Also the cast to complex masses DCMPLX(*) must be replaced by 415 # CMPLX(*,KIND=16) 416 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 417 418 def replaceWith(match_obj): 419 return match_obj.group('toSub')+'MP_'
420 421 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 422 re.IGNORECASE | re.MULTILINE) 423 424 for i, helas_call in enumerate(helas_calls_list): 425 new_helas_call=MP.sub(replaceWith,helas_call) 426 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 427 new_helas_call)
428 432 440
441 - def make(self):
442 """ Compiles the additional dependences for loop (such as CutTools).""" 443 super(LoopProcessExporterFortranSA, self).make() 444 445 # make CutTools (only necessary with MG option output_dependencies='internal') 446 libdir = os.path.join(self.dir_path,'lib') 447 sourcedir = os.path.join(self.dir_path,'Source') 448 if self.dependencies=='internal': 449 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 450 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 451 if os.path.exists(pjoin(sourcedir,'CutTools')): 452 logger.info('Compiling CutTools (can take a couple of minutes) ...') 453 misc.compile(['CutTools'], cwd = sourcedir) 454 logger.info(' ...done.') 455 else: 456 raise MadGraph5Error('Could not compile CutTools because its'+\ 457 ' source directory could not be found in the SOURCE folder.') 458 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 459 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 460 raise MadGraph5Error('CutTools compilation failed.') 461 462 # Verify compatibility between current compiler and the one which was 463 # used when last compiling CutTools (if specified). 464 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 465 libdir, 'libcts.a')))),'compiler_version.log') 466 if os.path.exists(compiler_log_path): 467 compiler_version_used = open(compiler_log_path,'r').read() 468 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 469 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 470 if os.path.exists(pjoin(sourcedir,'CutTools')): 471 logger.info('CutTools was compiled with a different fortran'+\ 472 ' compiler. Re-compiling it now...') 473 misc.compile(['cleanCT'], cwd = sourcedir) 474 misc.compile(['CutTools'], cwd = sourcedir) 475 logger.info(' ...done.') 476 else: 477 raise MadGraph5Error("CutTools installation in %s"\ 478 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 479 " seems to have been compiled with a different compiler than"+\ 480 " the one specified in MG5_aMC. Please recompile CutTools.")
481
482 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
483 """Concatenate the coefficient information to reduce it to 484 (fraction, is_imaginary) """ 485 486 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 487 488 return (total_coeff, is_imaginary)
489
490 - def get_amp_to_jamp_map(self, col_amps, n_amps):
491 """ Returns a list with element 'i' being a list of tuples corresponding 492 to all apparition of amplitude number 'i' in the jamp number 'j' 493 with coeff 'coeff_j'. The format of each tuple describing an apparition 494 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 495 496 if(isinstance(col_amps,list)): 497 if(col_amps and isinstance(col_amps[0],list)): 498 color_amplitudes=col_amps 499 else: 500 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 501 else: 502 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 503 504 # To store the result 505 res_list = [[] for i in range(n_amps)] 506 for i, coeff_list in enumerate(color_amplitudes): 507 for (coefficient, amp_number) in coeff_list: 508 res_list[amp_number-1].append((i,self.cat_coeff(\ 509 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 510 511 return res_list
512
513 - def get_color_matrix(self, matrix_element):
514 """Return the color matrix definition lines. This color matrix is of size 515 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 516 amplitude.""" 517 518 logger.info('Computing diagram color coefficients') 519 520 # The two lists have a list of tuples at element 'i' which correspond 521 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 522 # with coeff 'coeffj'. The format of each tuple describing an apparition 523 # is (j, coeffj). 524 ampl_to_jampl=self.get_amp_to_jamp_map(\ 525 matrix_element.get_loop_color_amplitudes(), 526 matrix_element.get_number_of_loop_amplitudes()) 527 if matrix_element.get('processes')[0].get('has_born'): 528 ampb_to_jampb=self.get_amp_to_jamp_map(\ 529 matrix_element.get_born_color_amplitudes(), 530 matrix_element.get_number_of_born_amplitudes()) 531 else: 532 ampb_to_jampb=ampl_to_jampl 533 # Below is the original color matrix multiplying the JAMPS 534 if matrix_element.get('color_matrix'): 535 ColorMatrixDenom = \ 536 matrix_element.get('color_matrix').get_line_denominators() 537 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 538 get_line_numerators(index, denominator) for 539 (index, denominator) in enumerate(ColorMatrixDenom) ] 540 else: 541 ColorMatrixDenom= [1] 542 ColorMatrixNum = [[1]] 543 544 # Below is the final color matrix output 545 ColorMatrixNumOutput=[] 546 ColorMatrixDenomOutput=[] 547 548 # Now we construct the color factors between each born and loop amplitude 549 # by scanning their contributions to the different jamps. 550 start = time.time() 551 progress_bar = None 552 time_info = False 553 for i, jampl_list in enumerate(ampl_to_jampl): 554 # This can be pretty long for processes with many color flows. 555 # So, if necessary (i.e. for more than 15s), we tell the user the 556 # estimated time for the processing. 557 if i==5: 558 elapsed_time = time.time()-start 559 t = len(ampl_to_jampl)*(elapsed_time/5.0) 560 if t > 10.0: 561 time_info = True 562 logger.info('The color factors computation will take '+\ 563 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 564 'Started on %s.'%datetime.datetime.now().strftime(\ 565 "%d-%m-%Y %H:%M")) 566 if logger.getEffectiveLevel()<logging.WARNING: 567 widgets = ['Color computation:', pbar.Percentage(), ' ', 568 pbar.Bar(),' ', pbar.ETA(), ' '] 569 progress_bar = pbar.ProgressBar(widgets=widgets, 570 maxval=len(ampl_to_jampl), fd=sys.stdout) 571 572 if not progress_bar is None: 573 progress_bar.update(i+1) 574 # Flush to force the printout of the progress_bar to be updated 575 sys.stdout.flush() 576 577 line_num=[] 578 line_denom=[] 579 580 # Treat the special case where this specific amplitude contributes to no 581 # color flow at all. So it is zero because of color but not even due to 582 # an accidental cancellation among color flows, but simply because of its 583 # projection to each individual color flow is zero. In such case, the 584 # corresponding jampl_list is empty and all color coefficients must then 585 # be zero. This happens for example in the Higgs Effective Theory model 586 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 587 if len(jampl_list)==0: 588 line_num=[0]*len(ampb_to_jampb) 589 line_denom=[1]*len(ampb_to_jampb) 590 ColorMatrixNumOutput.append(line_num) 591 ColorMatrixDenomOutput.append(line_denom) 592 continue 593 594 for jampb_list in ampb_to_jampb: 595 real_num=0 596 imag_num=0 597 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 598 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 599 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 600 itertools.product(jampl_list,jampb_list)]) 601 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 602 itertools.product(jampl_list,jampb_list): 603 # take the numerator and multiply by lcm/denominator 604 # as we will later divide by the lcm. 605 buff_num=ampl_coeff[0].numerator*\ 606 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 607 abs(common_denom)/(ampl_coeff[0].denominator*\ 608 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 609 # Remember that we must take the complex conjugate of 610 # the born jamp color coefficient because we will compute 611 # the square with 2 Re(LoopAmp x BornAmp*) 612 if ampl_coeff[1] and ampb_coeff[1]: 613 real_num=real_num+buff_num 614 elif not ampl_coeff[1] and not ampb_coeff[1]: 615 real_num=real_num+buff_num 616 elif not ampl_coeff[1] and ampb_coeff[1]: 617 imag_num=imag_num-buff_num 618 else: 619 imag_num=imag_num+buff_num 620 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 621 "color matrix element which has both a real and imaginary part." 622 if imag_num!=0: 623 res=fractions.Fraction(imag_num,common_denom) 624 line_num.append(res.numerator) 625 # Negative denominator means imaginary color coef of the 626 # final color matrix 627 line_denom.append(res.denominator*-1) 628 else: 629 res=fractions.Fraction(real_num,common_denom) 630 line_num.append(res.numerator) 631 # Positive denominator means real color coef of the final color matrix 632 line_denom.append(res.denominator) 633 634 ColorMatrixNumOutput.append(line_num) 635 ColorMatrixDenomOutput.append(line_denom) 636 637 if time_info: 638 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 639 "%d-%m-%Y %H:%M")) 640 if progress_bar!=None: 641 progress_bar.finish() 642 643 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
644
645 - def get_context(self,matrix_element):
646 """ Returns the contextual variables which need to be set when 647 pre-processing the template files.""" 648 649 # The nSquaredSO entry of the general replace dictionary should have 650 # been set in write_loopmatrix prior to the first call to this function 651 # However, for cases where the TIRCaching contextual variable is 652 # irrelevant (like in the default output), this might not be the case 653 # so we set it to 1. 654 try: 655 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 656 except (KeyError, AttributeError): 657 n_squared_split_orders = 1 658 659 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 660 661 # Force the computation of loop color flows for loop_induced processes 662 ComputeColorFlows = self.compute_color_flows or LoopInduced 663 # The variable AmplitudeReduction is just to make the contextual 664 # conditions more readable in the include files. 665 AmplitudeReduction = LoopInduced or ComputeColorFlows 666 # Even when not reducing at the amplitude level, the TIR caching 667 # is useful when there is more than one squared split order config. 668 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 669 MadEventOutput = False 670 671 return {'LoopInduced': LoopInduced, 672 'ComputeColorFlows': ComputeColorFlows, 673 'AmplitudeReduction': AmplitudeReduction, 674 'TIRCaching': TIRCaching, 675 'MadEventOutput': MadEventOutput}
676 677 #=========================================================================== 678 # generate_subprocess_directory_v4 679 #===========================================================================
680 - def generate_loop_subprocess(self, matrix_element, fortran_model, 681 group_number = None, proc_id = None, config_map=None):
682 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 683 including the necessary loop_matrix.f, born_matrix.f and include files. 684 Notice that this is too different from generate_subprocess_directory_v4 685 so that there is no point reusing this mother function. 686 The 'group_number' and 'proc_id' options are only used for the LoopInduced 687 MadEvent output and only to specify the ME_identifier and the P* 688 SubProcess directory name.""" 689 690 cwd = os.getcwd() 691 proc_dir_name = self.get_SubProc_folder_name( 692 matrix_element.get('processes')[0],group_number,proc_id) 693 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 694 695 try: 696 os.mkdir(dirpath) 697 except os.error as error: 698 logger.warning(error.strerror + " " + dirpath) 699 700 try: 701 os.chdir(dirpath) 702 except os.error: 703 logger.error('Could not cd to directory %s' % dirpath) 704 return 0 705 706 logger.info('Creating files in directory %s' % dirpath) 707 708 # Extract number of external particles 709 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 710 711 calls=self.write_loop_matrix_element_v4(None,matrix_element, 712 fortran_model, group_number = group_number, 713 proc_id = proc_id, config_map = config_map) 714 715 # We assume here that all processes must share the same property of 716 # having a born or not, which must be true anyway since these are two 717 # definite different classes of processes which can never be treated on 718 # the same footing. 719 if matrix_element.get('processes')[0].get('has_born'): 720 filename = 'born_matrix.f' 721 calls = self.write_bornmatrix( 722 writers.FortranWriter(filename), 723 matrix_element, 724 fortran_model) 725 726 filename = 'pmass.inc' 727 self.write_pmass_file(writers.FortranWriter(filename), 728 matrix_element) 729 730 filename = 'ngraphs.inc' 731 self.write_ngraphs_file(writers.FortranWriter(filename), 732 len(matrix_element.get_all_amplitudes())) 733 734 # Do not draw the loop diagrams if they are too many. 735 # The user can always decide to do it manually, if really needed 736 loop_diags = [loop_diag for loop_diag in\ 737 matrix_element.get('base_amplitude').get('loop_diagrams')\ 738 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 739 if len(loop_diags)>5000: 740 logger.info("There are more than 5000 loop diagrams."+\ 741 "Only the first 5000 are drawn.") 742 filename = "loop_matrix.ps" 743 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 744 loop_diags[:5000]),filename, 745 model=matrix_element.get('processes')[0].get('model'),amplitude='') 746 logger.info("Drawing loop Feynman diagrams for " + \ 747 matrix_element.get('processes')[0].nice_string()) 748 plot.draw() 749 750 if matrix_element.get('processes')[0].get('has_born'): 751 filename = "born_matrix.ps" 752 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 753 get('born_diagrams'), 754 filename, 755 model=matrix_element.get('processes')[0].\ 756 get('model'), 757 amplitude='') 758 logger.info("Generating born Feynman diagrams for " + \ 759 matrix_element.get('processes')[0].nice_string(\ 760 print_weighted=False)) 761 plot.draw() 762 763 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 764 matrix_element.get('processes')[0],group_number,proc_id)) 765 766 # Return to original PWD 767 os.chdir(cwd) 768 769 if not calls: 770 calls = 0 771 return calls
772 793
794 - def generate_general_replace_dict(self,matrix_element, 795 group_number = None, proc_id = None):
796 """Generates the entries for the general replacement dictionary used 797 for the different output codes for this exporter.The arguments 798 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 799 800 dict={} 801 # A general process prefix which appears in front of all MadLooop 802 # subroutines and common block so that several processes can be compiled 803 # together into one library, as necessary to follow BLHA guidelines. 804 805 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 806 group_number = group_number, group_elem_number = proc_id) 807 808 # The proc_id is used for MadEvent grouping, so none of our concern here 809 # and it is simply set to an empty string. 810 dict['proc_id'] = '' 811 # Extract version number and date from VERSION file 812 info_lines = self.get_mg5_info_lines() 813 dict['info_lines'] = info_lines 814 # Extract process info lines 815 process_lines = self.get_process_info_lines(matrix_element) 816 dict['process_lines'] = process_lines 817 # Extract number of external particles 818 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 819 dict['nexternal'] = nexternal 820 dict['nincoming'] = ninitial 821 # Extract ncomb 822 ncomb = matrix_element.get_helicity_combinations() 823 dict['ncomb'] = ncomb 824 # Extract nloopamps 825 nloopamps = matrix_element.get_number_of_loop_amplitudes() 826 dict['nloopamps'] = nloopamps 827 # Extract nloopdiags 828 nloopdiags = len(matrix_element.get('diagrams')) 829 dict['nloopdiags'] = nloopdiags 830 # Extract nctamps 831 nctamps = matrix_element.get_number_of_CT_amplitudes() 832 dict['nctamps'] = nctamps 833 # Extract nwavefuncs 834 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 835 dict['nwavefuncs'] = nwavefuncs 836 # Set format of the double precision 837 dict['real_dp_format']='real*8' 838 dict['real_mp_format']='real*16' 839 # Set format of the complex 840 dict['complex_dp_format']='complex*16' 841 dict['complex_mp_format']='complex*32' 842 # Set format of the masses 843 dict['mass_dp_format'] = dict['complex_dp_format'] 844 dict['mass_mp_format'] = dict['complex_mp_format'] 845 # Fill in default values for the placeholders for the madevent 846 # loop-induced output 847 dict['nmultichannels'] = 0 848 dict['nmultichannel_configs'] = 0 849 dict['config_map_definition'] = '' 850 dict['config_index_map_definition'] = '' 851 # Color matrix size 852 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 853 # it is NLOOPAMPSxNBORNAMPS 854 # Also, how to access the number of Born squared order contributions 855 856 if matrix_element.get('processes')[0].get('has_born'): 857 dict['color_matrix_size'] = 'nbornamps' 858 dict['get_nsqso_born']=\ 859 "include 'nsqso_born.inc'" 860 else: 861 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 862 PARAMETER (NSQSO_BORN=0) 863 """ 864 dict['color_matrix_size'] = 'nloopamps' 865 866 # These placeholders help to have as many common templates for the 867 # output of the loop induced processes and those with a born 868 # contribution. 869 if matrix_element.get('processes')[0].get('has_born'): 870 # Extract nbornamps 871 nbornamps = matrix_element.get_number_of_born_amplitudes() 872 dict['nbornamps'] = nbornamps 873 dict['ncomb_helas_objs'] = ',ncomb' 874 dict['nbornamps_decl'] = \ 875 """INTEGER NBORNAMPS 876 PARAMETER (NBORNAMPS=%d)"""%nbornamps 877 dict['nBornAmps'] = nbornamps 878 879 else: 880 dict['ncomb_helas_objs'] = '' 881 dict['dp_born_amps_decl'] = '' 882 dict['dp_born_amps_decl_in_mp'] = '' 883 dict['copy_mp_to_dp_born_amps'] = '' 884 dict['mp_born_amps_decl'] = '' 885 dict['nbornamps_decl'] = '' 886 dict['nbornamps'] = 0 887 dict['nBornAmps'] = 0 888 889 return dict
890
891 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 892 group_number = None, proc_id = None, config_map = None):
893 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 894 mp_born_amps_and_wfs. 895 The arguments group_number and proc_id are just for the LoopInduced 896 output with MadEvent and only used in get_ME_identifier. 897 """ 898 899 # Create the necessary files for the loop matrix element subroutine 900 901 if config_map: 902 raise MadGraph5Error, 'The default loop output cannot be used with'+\ 903 'MadEvent and cannot compute the AMP2 for multi-channeling.' 904 905 if not isinstance(fortran_model,\ 906 helas_call_writers.FortranUFOHelasCallWriter): 907 raise MadGraph5Error, 'The loop fortran output can only'+\ 908 ' work with a UFO Fortran model' 909 910 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 911 argument=fortran_model.get('model'), 912 hel_sum=matrix_element.get('processes')[0].get('has_born')) 913 914 # Compute the analytical information of the loop wavefunctions in the 915 # loop helas matrix elements using the cached aloha model to reuse 916 # as much as possible the aloha computations already performed for 917 # writing out the aloha fortran subroutines. 918 matrix_element.compute_all_analytic_information( 919 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 920 921 # Initialize a general replacement dictionary with entries common to 922 # many files generated here. 923 matrix_element.rep_dict = self.generate_general_replace_dict( 924 matrix_element, group_number = group_number, proc_id = proc_id) 925 926 # Extract max number of loop couplings (specific to this output type) 927 matrix_element.rep_dict['maxlcouplings']= \ 928 matrix_element.find_max_loop_coupling() 929 # The born amp declaration suited for also outputing the loop-induced 930 # processes as well. 931 if matrix_element.get('processes')[0].get('has_born'): 932 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 933 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 934 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 935 matrix_element.rep_dict['dp_born_amps_decl'] = \ 936 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 937 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 938 matrix_element.rep_dict['mp_born_amps_decl'] = \ 939 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 940 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 941 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 942 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 943 944 if writer: 945 raise MadGraph5Error, 'Matrix output mode no longer supported.' 946 947 filename = 'loop_matrix.f' 948 calls = self.write_loopmatrix(writers.FortranWriter(filename), 949 matrix_element, 950 LoopFortranModel) 951 952 # Write out the proc_prefix in a file, this is quite handy 953 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 954 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 955 proc_prefix_writer.close() 956 957 filename = 'check_sa.f' 958 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 959 960 filename = 'CT_interface.f' 961 self.write_CT_interface(writers.FortranWriter(filename),\ 962 matrix_element) 963 964 965 966 filename = 'improve_ps.f' 967 calls = self.write_improve_ps(writers.FortranWriter(filename), 968 matrix_element) 969 970 filename = 'loop_num.f' 971 self.write_loop_num(writers.FortranWriter(filename),\ 972 matrix_element,LoopFortranModel) 973 974 filename = 'mp_born_amps_and_wfs.f' 975 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 976 matrix_element,LoopFortranModel) 977 978 # Extract number of external particles 979 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 980 filename = 'nexternal.inc' 981 self.write_nexternal_file(writers.FortranWriter(filename), 982 nexternal, ninitial) 983 984 filename = 'process_info.inc' 985 self.write_process_info_file(writers.FortranWriter(filename), 986 matrix_element) 987 return calls
988
989 - def write_process_info_file(self, writer, matrix_element):
990 """A small structural function to write the include file specifying some 991 process characteristics.""" 992 993 model = matrix_element.get('processes')[0].get('model') 994 process_info = {} 995 # The maximum spin of any particle connected (or directly running in) 996 # any loop of this matrix element. This is important because there is 997 # some limitation in the stability tests that can be performed when this 998 # maximum spin is above 3 (vectors). Also CutTools has limitations in 999 # this regard. 1000 process_info['max_spin_connected_to_loop']=\ 1001 matrix_element.get_max_spin_connected_to_loop() 1002 1003 process_info['max_spin_external_particle']= max( 1004 model.get_particle(l.get('id')).get('spin') for l in 1005 matrix_element.get('processes')[0].get('legs')) 1006 1007 proc_include = \ 1008 """ 1009 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1010 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1011 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1012 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1013 """%process_info 1014 1015 writer.writelines(proc_include)
1016
1017 - def generate_subprocess_directory_v4(self, matrix_element, fortran_model):
1018 """ To overload the default name for this function such that the correct 1019 function is used when called from the command interface """ 1020 1021 return self.generate_loop_subprocess(matrix_element,fortran_model)
1022
1023 - def write_check_sa(self, writer, matrix_element):
1024 """Writes out the steering code check_sa. In the optimized output mode, 1025 All the necessary entries in the replace_dictionary have already been 1026 set in write_loopmatrix because it is only there that one has access to 1027 the information about split orders.""" 1028 replace_dict = copy.copy(matrix_element.rep_dict) 1029 for key in ['print_so_born_results','print_so_loop_results', 1030 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1031 if key not in replace_dict.keys(): 1032 replace_dict[key]='' 1033 1034 if matrix_element.get('processes')[0].get('has_born'): 1035 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1036 else: 1037 file = open(os.path.join(self.template_dir,\ 1038 'check_sa_loop_induced.inc')).read() 1039 file=file%replace_dict 1040 writer.writelines(file) 1041 1042 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1043 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1044 return 1045 file = open(os.path.join(self.template_dir,\ 1046 'check_py.f.inc')).read() 1047 file=file%replace_dict 1048 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1049 new_writer = writer.__class__(new_path, 'w') 1050 new_writer.writelines(file) 1051 1052 file = open(os.path.join(self.template_dir,\ 1053 'check_sa.py.inc')).read() 1054 # For now just put in an empty PS point but in the future, maybe generate 1055 # a valid one already here by default 1056 curr_proc = matrix_element.get('processes')[0] 1057 random_PSpoint_python_formatted = \ 1058 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1059 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1060 1061 process_definition_string = curr_proc.nice_string().replace('Process:','') 1062 file=file.format(random_PSpoint_python_formatted,process_definition_string) 1063 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1064 new_writer = open(new_path, 'w') 1065 new_writer.writelines(file) 1066 # Make it executable 1067 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1068
1069 - def write_improve_ps(self, writer, matrix_element):
1070 """ Write out the improve_ps subroutines which modify the PS point 1071 given in input and slightly deform it to achieve exact onshellness on 1072 all external particles as well as perfect energy-momentum conservation""" 1073 replace_dict = copy.copy(matrix_element.rep_dict) 1074 1075 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1076 replace_dict['ninitial']=ninitial 1077 mass_list=matrix_element.get_external_masses()[:-2] 1078 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1079 1080 # Write the quadruple precision version of this routine only. 1081 replace_dict['real_format']=replace_dict['real_mp_format'] 1082 replace_dict['mp_prefix']='MP_' 1083 replace_dict['exp_letter']='e' 1084 replace_dict['mp_specifier']='_16' 1085 replace_dict['coupl_inc_name']='mp_coupl.inc' 1086 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1087 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1088 i, m in enumerate(mass_list)]) 1089 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1090 file_mp=file_mp%replace_dict 1091 # 1092 writer.writelines(file_mp)
1093
1094 - def write_loop_num(self, writer, matrix_element,fortran_model):
1095 """ Create the file containing the core subroutine called by CutTools 1096 which contains the Helas calls building the loop""" 1097 1098 if not matrix_element.get('processes') or \ 1099 not matrix_element.get('diagrams'): 1100 return 0 1101 1102 # Set lowercase/uppercase Fortran code 1103 writers.FortranWriter.downcase = False 1104 1105 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1106 1107 replace_dict = copy.copy(matrix_element.rep_dict) 1108 1109 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1110 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1111 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1112 1113 # The squaring is only necessary for the processes with born where the 1114 # sum over helicities is done before sending the numerator to CT. 1115 dp_squaring_lines=['DO I=1,NBORNAMPS', 1116 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1117 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1118 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1119 mp_squaring_lines=['DO I=1,NBORNAMPS', 1120 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1121 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1122 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1123 if matrix_element.get('processes')[0].get('has_born'): 1124 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1125 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1126 else: 1127 replace_dict['dp_squaring']='RES=BUFF' 1128 replace_dict['mp_squaring']='QPRES=BUFF' 1129 1130 # Prepend MP_ to all helas calls. 1131 self.turn_to_mp_calls(loop_helas_calls) 1132 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1133 1134 file=file%replace_dict 1135 1136 if writer: 1137 writer.writelines(file) 1138 else: 1139 return file
1140
1141 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1142 """ Create the file CT_interface.f which contains the subroutine defining 1143 the loop HELAS-like calls along with the general interfacing subroutine. 1144 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1145 1146 files=[] 1147 1148 # First write CT_interface which interfaces MG5 with CutTools. 1149 replace_dict=copy.copy(matrix_element.rep_dict) 1150 1151 # We finalize CT result differently wether we used the built-in 1152 # squaring against the born. 1153 if matrix_element.get('processes')[0].get('has_born'): 1154 replace_dict['finalize_CT']='\n'.join([\ 1155 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1156 else: 1157 replace_dict['finalize_CT']='\n'.join([\ 1158 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1159 1160 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1161 1162 file = file % replace_dict 1163 files.append(file) 1164 1165 # Now collect the different kind of subroutines needed for the 1166 # loop HELAS-like calls. 1167 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1168 1169 for callkey in HelasLoopAmpsCallKeys: 1170 replace_dict=copy.copy(matrix_element.rep_dict) 1171 # Add to this dictionary all other attribute common to all 1172 # HELAS-like loop subroutines. 1173 if matrix_element.get('processes')[0].get('has_born'): 1174 replace_dict['validh_or_nothing']=',validh' 1175 else: 1176 replace_dict['validh_or_nothing']='' 1177 # In the optimized output, the number of couplings in the loop is 1178 # not specified so we only treat it here if necessary: 1179 if len(callkey)>2: 1180 replace_dict['ncplsargs']=callkey[2] 1181 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1182 replace_dict['cplsargs']=cplsargs 1183 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1184 replace_dict['cplsdecl']=cplsdecl 1185 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1186 replace_dict['mp_cplsdecl']=mp_cplsdecl 1187 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1188 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1189 for i in range(1,callkey[2]+1)]) 1190 replace_dict['cplset']=cplset 1191 1192 replace_dict['nloopline']=callkey[0] 1193 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1194 replace_dict['wfsargs']=wfsargs 1195 # We don't pass the multiple precision mass in the optimized_output 1196 if not optimized_output: 1197 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1198 else: 1199 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1200 replace_dict['margs']=margs 1201 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1202 replace_dict['wfsargsdecl']=wfsargsdecl 1203 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1204 replace_dict['margsdecl']=margsdecl 1205 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1206 replace_dict['mp_margsdecl']=mp_margsdecl 1207 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1208 i in range(1,callkey[1]+1)]) 1209 replace_dict['weset']=weset 1210 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1211 replace_dict['weset']=weset 1212 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1213 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1214 i in range(2,callkey[0]+1)]) 1215 replace_dict['mset']=mset 1216 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1217 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1218 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1219 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1220 i in range(3,callkey[0]+3)]) 1221 replace_dict['mset2']=mset2 1222 replace_dict['nwfsargs'] = callkey[1] 1223 if callkey[0]==callkey[1]: 1224 replace_dict['nwfsargs_header'] = "" 1225 replace_dict['pairingargs']="" 1226 replace_dict['pairingdecl']="" 1227 pairingset="""DO I=1,NLOOPLINE 1228 PAIRING(I)=1 1229 ENDDO 1230 """ 1231 replace_dict['pairingset']=pairingset 1232 else: 1233 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1234 pairingargs="".join([("P"+str(i)+", ") for i in \ 1235 range(1,callkey[0]+1)]) 1236 replace_dict['pairingargs']=pairingargs 1237 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1238 range(1,callkey[0]+1)])[:-2] 1239 replace_dict['pairingdecl']=pairingdecl 1240 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1241 i in range(1,callkey[0]+1)]) 1242 replace_dict['pairingset']=pairingset 1243 1244 file = open(os.path.join(self.template_dir,\ 1245 'helas_loop_amplitude.inc')).read() 1246 file = file % replace_dict 1247 files.append(file) 1248 1249 file="\n".join(files) 1250 1251 if writer: 1252 writer.writelines(file,context=self.get_context(matrix_element)) 1253 else: 1254 return file
1255 1256 # Helper function to split HELAS CALLS in dedicated subroutines placed 1257 # in different files.
1258 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1259 helas_calls, entry_name, bunch_name,n_helas=2000, 1260 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1261 continue_label = 1000, momenta_array_name='P', 1262 context={}):
1263 """ Finish the code generation with splitting. 1264 Split the helas calls in the argument helas_calls into bunches of 1265 size n_helas and place them in dedicated subroutine with name 1266 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1267 in the replace_dict dictionary under the entry entry_name. 1268 The context specified will be forwarded to the the fileWriter.""" 1269 helascalls_replace_dict=copy.copy(replace_dict) 1270 helascalls_replace_dict['bunch_name']=bunch_name 1271 helascalls_files=[] 1272 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1273 helascalls_replace_dict['bunch_number']=i+1 1274 helascalls_replace_dict['helas_calls']=\ 1275 '\n'.join(helas_calls[k:k + n_helas]) 1276 helascalls_replace_dict['required_so_broadcaster']=\ 1277 required_so_broadcaster 1278 helascalls_replace_dict['continue_label']=continue_label 1279 new_helascalls_file = open(os.path.join(self.template_dir,\ 1280 template_name)).read() 1281 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1282 helascalls_files.append(new_helascalls_file) 1283 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1284 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1285 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1286 for a in range(len(helascalls_files))] 1287 replace_dict[entry_name]='\n'.join(helascalls_calls) 1288 if writer: 1289 for i, helascalls_file in enumerate(helascalls_files): 1290 filename = '%s_%d.f'%(bunch_name,i+1) 1291 writers.FortranWriter(filename).writelines(helascalls_file, 1292 context=context) 1293 else: 1294 masterfile='\n'.join([masterfile,]+helascalls_files) 1295 1296 return masterfile
1297
1298 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1299 noSplit=False):
1300 """Create the loop_matrix.f file.""" 1301 1302 if not matrix_element.get('processes') or \ 1303 not matrix_element.get('diagrams'): 1304 return 0 1305 1306 # Set lowercase/uppercase Fortran code 1307 1308 writers.FortranWriter.downcase = False 1309 1310 replace_dict = copy.copy(matrix_element.rep_dict) 1311 1312 # Extract overall denominator 1313 # Averaging initial state color, spin, and identical FS particles 1314 den_factor_line = self.get_den_factor_line(matrix_element) 1315 replace_dict['den_factor_line'] = den_factor_line 1316 # When the user asks for the polarized matrix element we must 1317 # multiply back by the helicity averaging factor 1318 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1319 1320 # These entries are specific for the output for loop-induced processes 1321 # Also sets here the details of the squaring of the loop ampltiudes 1322 # with the born or the loop ones. 1323 if not matrix_element.get('processes')[0].get('has_born'): 1324 replace_dict['compute_born']=\ 1325 """C There is of course no born for loop induced processes 1326 ANS(0)=0.0d0 1327 """ 1328 replace_dict['set_reference']='\n'.join([ 1329 'C For loop-induced, the reference for comparison is set later'+\ 1330 ' from the total contribution of the previous PS point considered.', 1331 'C But you can edit here the value to be used for the first PS point.', 1332 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1333 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1334 replace_dict['loop_induced_setup'] = '\n'.join([ 1335 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1336 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1337 replace_dict['loop_induced_finalize'] = \ 1338 ("""DO I=NCTAMPS+1,NLOOPAMPS 1339 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1340 WRITE(*,*) '##W03 WARNING Contribution ',I 1341 WRITE(*,*) ' is unstable for helicity ',H 1342 ENDIF 1343 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1344 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1345 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1346 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1347 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1348 C ENDIF 1349 ENDDO 1350 1227 CONTINUE 1351 HELPICKED=HELPICKED_BU""")%replace_dict 1352 replace_dict['loop_helas_calls']="" 1353 replace_dict['nctamps_or_nloopamps']='nloopamps' 1354 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1355 replace_dict['squaring']=\ 1356 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1357 IF (J.EQ.1) THEN 1358 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1359 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1360 ENDIF""" 1361 else: 1362 replace_dict['compute_born']=\ 1363 """C Compute the born, for a specific helicity if asked so. 1364 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1365 """%matrix_element.rep_dict 1366 replace_dict['set_reference']=\ 1367 """C We chose to use the born evaluation for the reference 1368 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1369 replace_dict['loop_induced_helas_calls'] = "" 1370 replace_dict['loop_induced_finalize'] = "" 1371 replace_dict['loop_induced_setup'] = "" 1372 replace_dict['nctamps_or_nloopamps']='nctamps' 1373 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1374 replace_dict['squaring']='\n'.join(['DO K=1,3', 1375 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1376 'ENDDO']) 1377 1378 # Write a dummy nsquaredSO.inc which is used in the default 1379 # loop_matrix.f code (even though it does not support split orders evals) 1380 # just to comply with the syntax expected from the external code using MadLoop. 1381 writers.FortranWriter('nsquaredSO.inc').writelines( 1382 """INTEGER NSQUAREDSO 1383 PARAMETER (NSQUAREDSO=0)""") 1384 1385 # Actualize results from the loops computed. Only necessary for 1386 # processes with a born. 1387 actualize_ans=[] 1388 if matrix_element.get('processes')[0].get('has_born'): 1389 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1390 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1391 in range(1,4)) 1392 actualize_ans.append(\ 1393 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1394 actualize_ans.append(\ 1395 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1396 actualize_ans.extend(["ENDIF","ENDDO"]) 1397 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1398 else: 1399 replace_dict['actualize_ans']=\ 1400 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1401 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1402 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1403 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1404 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1405 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1406 C ENDIF""")%replace_dict 1407 1408 # Write out the color matrix 1409 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1410 CMWriter=open(pjoin('..','MadLoop5_resources', 1411 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1412 for ColorLine in CMNum: 1413 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1414 CMWriter.close() 1415 CMWriter=open(pjoin('..','MadLoop5_resources', 1416 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1417 for ColorLine in CMDenom: 1418 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1419 CMWriter.close() 1420 1421 # Write out the helicity configurations 1422 HelConfigs=matrix_element.get_helicity_matrix() 1423 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1424 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1425 for HelConfig in HelConfigs: 1426 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1427 HelConfigWriter.close() 1428 1429 # Extract helas calls 1430 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1431 matrix_element) 1432 # The proc_prefix must be replaced 1433 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1434 for lc in loop_amp_helas_calls] 1435 1436 born_ct_helas_calls, UVCT_helas_calls = \ 1437 fortran_model.get_born_ct_helas_calls(matrix_element) 1438 # In the default output, we do not need to separate these two kind of 1439 # contributions 1440 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1441 file = open(os.path.join(self.template_dir,\ 1442 1443 'loop_matrix_standalone.inc')).read() 1444 1445 if matrix_element.get('processes')[0].get('has_born'): 1446 toBeRepaced='loop_helas_calls' 1447 else: 1448 toBeRepaced='loop_induced_helas_calls' 1449 1450 # Decide here wether we need to split the loop_matrix.f file or not. 1451 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1452 file=self.split_HELASCALLS(writer,replace_dict,\ 1453 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1454 'born_ct_helas_calls','helas_calls_ampb') 1455 file=self.split_HELASCALLS(writer,replace_dict,\ 1456 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1457 toBeRepaced,'helas_calls_ampl') 1458 else: 1459 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1460 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1461 1462 file = file % replace_dict 1463 1464 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1465 n_loop_calls = len(filter(lambda call: 1466 not loop_calls_finder.match(call) is None, loop_amp_helas_calls)) 1467 if writer: 1468 # Write the file 1469 writer.writelines(file) 1470 return n_loop_calls 1471 else: 1472 # Return it to be written along with the others 1473 return n_loop_calls, file
1474
1475 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1476 """Create the born_matrix.f file for the born process as for a standard 1477 tree-level computation.""" 1478 1479 if not matrix_element.get('processes') or \ 1480 not matrix_element.get('diagrams'): 1481 return 0 1482 1483 if not isinstance(writer, writers.FortranWriter): 1484 raise writers.FortranWriter.FortranWriterError(\ 1485 "writer not FortranWriter") 1486 1487 # For now, we can use the exact same treatment as for tree-level 1488 # computations by redefining here a regular HelasMatrixElementf or the 1489 # born process. 1490 # It is important to make a deepcopy, as we don't want any possible 1491 # treatment on the objects of the bornME to have border effects on 1492 # the content of the LoopHelasMatrixElement object. 1493 bornME = helas_objects.HelasMatrixElement() 1494 for prop in bornME.keys(): 1495 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1496 bornME.set('base_amplitude',None,force=True) 1497 bornME.set('diagrams',copy.deepcopy(\ 1498 matrix_element.get_born_diagrams())) 1499 bornME.set('color_basis',copy.deepcopy(\ 1500 matrix_element.get('born_color_basis'))) 1501 bornME.set('color_matrix',copy.deepcopy(\ 1502 color_amp.ColorMatrix(bornME.get('color_basis')))) 1503 # This is to decide wether once to reuse old wavefunction to store new 1504 # ones (provided they are not used further in the code.) 1505 bornME.optimization = True 1506 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1507 writer, bornME, fortran_model, 1508 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1509
1510 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1511 noSplit=False):
1512 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1513 computes just the external wavefunction and born amplitudes in 1514 multiple precision. """ 1515 1516 if not matrix_element.get('processes') or \ 1517 not matrix_element.get('diagrams'): 1518 return 0 1519 1520 replace_dict = copy.copy(matrix_element.rep_dict) 1521 1522 # For the wavefunction copy, check what suffix is needed for the W array 1523 if matrix_element.get('processes')[0].get('has_born'): 1524 replace_dict['h_w_suffix']=',H' 1525 else: 1526 replace_dict['h_w_suffix']='' 1527 1528 # Extract helas calls 1529 born_amps_and_wfs_calls , uvct_amp_calls = \ 1530 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1531 # In the default output, these two kind of contributions do not need to 1532 # be differentiated 1533 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1534 1535 # Turn these HELAS calls to the multiple-precision version of the HELAS 1536 # subroutines. 1537 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1538 1539 file = open(os.path.join(self.template_dir,\ 1540 'mp_born_amps_and_wfs.inc')).read() 1541 # Decide here wether we need to split the loop_matrix.f file or not. 1542 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1543 file=self.split_HELASCALLS(writer,replace_dict,\ 1544 'mp_helas_calls_split.inc',file,\ 1545 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1546 'mp_helas_calls') 1547 else: 1548 replace_dict['born_amps_and_wfs_calls']=\ 1549 '\n'.join(born_amps_and_wfs_calls) 1550 1551 file = file % replace_dict 1552 if writer: 1553 # Write the file 1554 writer.writelines(file) 1555 else: 1556 # Return it to be written along with the others 1557 return file 1558 1559 #=============================================================================== 1560 # LoopProcessOptimizedExporterFortranSA 1561 #=============================================================================== 1562
1563 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1564 """Class to take care of exporting a set of loop matrix elements in the 1565 Fortran format which exploits the Pozzorini method of representing 1566 the loop numerators as polynomial to render its evaluations faster.""" 1567 1568 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1569 # The option below controls wether one wants to group together in one single 1570 # CutTools/TIR call the loops with same denominator structure 1571 forbid_loop_grouping = False 1572 1573 # List of potential TIR library one wants to link to. 1574 # Golem and Samurai will typically get obtained from gosam_contrib 1575 # which might also contain a version of ninja. We must therefore 1576 # make sure that ninja appears first in the list of -L because 1577 # it is the tool for which the user is most susceptible of 1578 # using a standalone verison independent of gosam_contrib 1579 all_tir=['pjfry','iregi','ninja','golem','samurai'] 1580
1581 - def __init__(self, mgme_dir="", dir_path = "", opt=None):
1582 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1583 information on where to find all the loop-related source files, 1584 like CutTools and TIR""" 1585 1586 super(LoopProcessOptimizedExporterFortranSA,self).__init__(mgme_dir, 1587 dir_path, opt) 1588 1589 # TIR available ones 1590 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1591 'samurai':True,'ninja':True} 1592 1593 for tir in self.all_tir: 1594 tir_dir="%s_dir"%tir 1595 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1596 # Make sure to defer the 'local path' to the current MG5aMC root. 1597 tir_path = self.opt[tir_dir].strip() 1598 if tir_path.startswith('.'): 1599 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1600 setattr(self,tir_dir,tir_path) 1601 else: 1602 setattr(self,tir_dir,'')
1603
1604 - def copy_v4template(self, modelname = ''):
1605 """Additional actions needed to setup the Template. 1606 """ 1607 1608 super(LoopProcessOptimizedExporterFortranSA, self).copy_v4template( 1609 modelname) 1610 1611 self.loop_optimized_additional_template_setup()
1612
1613 - def get_context(self,matrix_element, **opts):
1614 """ Additional contextual information which needs to be created for 1615 the optimized output.""" 1616 1617 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1618 **opts) 1619 1620 # For now assume Ninja always supports quadruple precision 1621 try: 1622 context['ninja_supports_quad_prec'] = \ 1623 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1624 except AttributeError: 1625 context['ninja_supports_quad_prec'] = False 1626 1627 for tir in self.all_tir: 1628 context['%s_available'%tir]=self.tir_available_dict[tir] 1629 # safety check 1630 if tir not in ['golem','pjfry','iregi','samurai','ninja']: 1631 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name 1632 1633 return context
1634
1636 """ Perform additional actions specific for this class when setting 1637 up the template with the copy_v4template function.""" 1638 1639 # We must link the TIR to the Library folder of the active Template 1640 link_tir_libs=[] 1641 tir_libs=[] 1642 tir_include=[] 1643 1644 for tir in self.all_tir: 1645 tir_dir="%s_dir"%tir 1646 libpath=getattr(self,tir_dir) 1647 libname="lib%s.a"%tir 1648 tir_name=tir 1649 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1650 libpath,libname,tir_name=tir_name) 1651 setattr(self,tir_dir,libpath) 1652 if libpath != "": 1653 if tir in ['ninja','pjfry','golem','samurai']: 1654 # It is cleaner to use the original location of the libraries 1655 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1656 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1657 if tir in ['ninja','golem', 'samurai']: 1658 trgt_path = pjoin(os.path.dirname(libpath),'include') 1659 to_include = misc.find_includes_path(trgt_path, 1660 self.include_names[tir]) 1661 if to_include is None: 1662 logger.error( 1663 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1664 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1665 to_include = '<Not_found_define_it_yourself>' 1666 tir_include.append('-I %s'%str(to_include)) 1667 # To be able to easily compile a MadLoop library using 1668 # makefiles built outside of the MG5_aMC framework 1669 # (such as what is done with the Sherpa interface), we 1670 # place here an easy handle on the golem includes 1671 name_map = {'golem':'golem95','samurai':'samurai', 1672 'ninja':'ninja'} 1673 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1674 name='%s_include'%name_map[tir],abspath=True) 1675 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1676 name='%s_lib'%name_map[tir],abspath=True) 1677 else : 1678 link_tir_libs.append('-l%s'%tir) 1679 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1680 1681 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1682 'MadLoop_makefile_definitions') 1683 if os.path.isfile(MadLoop_makefile_definitions): 1684 os.remove(MadLoop_makefile_definitions) 1685 1686 calls = self.write_loop_makefile_definitions( 1687 writers.MakefileWriter(MadLoop_makefile_definitions), 1688 link_tir_libs,tir_libs, tir_include=tir_include)
1689 1701 1702 1820
1821 - def set_group_loops(self, matrix_element):
1822 """ Decides whether we must group loops or not for this matrix element""" 1823 1824 # Decide if loops sharing same denominator structures have to be grouped 1825 # together or not. 1826 if self.forbid_loop_grouping: 1827 self.group_loops = False 1828 else: 1829 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 1830 and matrix_element.get('processes')[0].get('has_born') 1831 1832 return self.group_loops
1833
1834 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1835 group_number = None, proc_id = None, config_map = None):
1836 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 1837 and loop_num.f only but with the optimized FortranModel. 1838 The arguments group_number and proc_id are just for the LoopInduced 1839 output with MadEvent and only used in get_ME_identifier.""" 1840 1841 # Warn the user that the 'matrix' output where all relevant code is 1842 # put together in a single file is not supported in this loop output. 1843 if writer: 1844 raise MadGraph5Error, 'Matrix output mode no longer supported.' 1845 1846 if not isinstance(fortran_model,\ 1847 helas_call_writers.FortranUFOHelasCallWriter): 1848 raise MadGraph5Error, 'The optimized loop fortran output can only'+\ 1849 ' work with a UFO Fortran model' 1850 OptimizedFortranModel=\ 1851 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 1852 fortran_model.get('model'),False) 1853 1854 1855 if not matrix_element.get('processes')[0].get('has_born') and \ 1856 not self.compute_color_flows: 1857 logger.debug("Color flows will be employed despite the option"+\ 1858 " 'loop_color_flows' being set to False because it is necessary"+\ 1859 " for optimizations.") 1860 1861 # Compute the analytical information of the loop wavefunctions in the 1862 # loop helas matrix elements using the cached aloha model to reuse 1863 # as much as possible the aloha computations already performed for 1864 # writing out the aloha fortran subroutines. 1865 matrix_element.compute_all_analytic_information( 1866 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1867 1868 self.set_group_loops(matrix_element) 1869 1870 # Initialize a general replacement dictionary with entries common to 1871 # many files generated here. 1872 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 1873 generate_general_replace_dict(self, matrix_element, 1874 group_number = group_number, proc_id = proc_id) 1875 1876 # and those specific to the optimized output 1877 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 1878 1879 # Create the necessary files for the loop matrix element subroutine 1880 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1881 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1882 proc_prefix_writer.close() 1883 1884 filename = 'loop_matrix.f' 1885 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1886 matrix_element, 1887 OptimizedFortranModel) 1888 1889 filename = 'check_sa.f' 1890 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1891 1892 filename = 'polynomial.f' 1893 calls = self.write_polynomial_subroutines( 1894 writers.FortranWriter(filename), 1895 matrix_element) 1896 1897 filename = 'improve_ps.f' 1898 calls = self.write_improve_ps(writers.FortranWriter(filename), 1899 matrix_element) 1900 1901 filename = 'CT_interface.f' 1902 self.write_CT_interface(writers.FortranWriter(filename),\ 1903 matrix_element) 1904 1905 filename = 'TIR_interface.f' 1906 self.write_TIR_interface(writers.FortranWriter(filename), 1907 matrix_element) 1908 1909 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 1910 filename = 'GOLEM_interface.f' 1911 self.write_GOLEM_interface(writers.FortranWriter(filename), 1912 matrix_element) 1913 1914 filename = 'loop_num.f' 1915 self.write_loop_num(writers.FortranWriter(filename),\ 1916 matrix_element,OptimizedFortranModel) 1917 1918 filename = 'mp_compute_loop_coefs.f' 1919 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 1920 matrix_element,OptimizedFortranModel) 1921 1922 if self.get_context(matrix_element)['ComputeColorFlows']: 1923 filename = 'compute_color_flows.f' 1924 self.write_compute_color_flows(writers.FortranWriter(filename), 1925 matrix_element, config_map = config_map) 1926 1927 # Extract number of external particles 1928 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1929 filename = 'nexternal.inc' 1930 self.write_nexternal_file(writers.FortranWriter(filename), 1931 nexternal, ninitial) 1932 1933 # Write general process information 1934 filename = 'process_info.inc' 1935 self.write_process_info_file(writers.FortranWriter(filename), 1936 matrix_element) 1937 1938 if self.get_context(matrix_element)['TIRCaching']: 1939 filename = 'tir_cache_size.inc' 1940 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 1941 1942 return calls
1943
1944 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
1945 """ Specify the entries of the replacement dictionary which are specific 1946 to the optimized output and only relevant to it (the more general entries 1947 are set in the the mother class LoopProcessExporterFortranSA.""" 1948 1949 max_loop_rank=matrix_element.get_max_loop_rank() 1950 matrix_element.rep_dict['maxrank']=max_loop_rank 1951 matrix_element.rep_dict['loop_max_coefs']=\ 1952 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 1953 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 1954 matrix_element.rep_dict['vertex_max_coefs']=\ 1955 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 1956 1957 matrix_element.rep_dict['nloopwavefuncs']=\ 1958 matrix_element.get_number_of_loop_wavefunctions() 1959 max_spin=matrix_element.get_max_loop_particle_spin() 1960 1961 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 1962 matrix_element.rep_dict['nloops']=len(\ 1963 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 1964 lamp in ldiag.get_loop_amplitudes()]) 1965 1966 if self.set_group_loops(matrix_element): 1967 matrix_element.rep_dict['nloop_groups']=\ 1968 len(matrix_element.get('loop_groups')) 1969 else: 1970 matrix_element.rep_dict['nloop_groups']=\ 1971 matrix_element.rep_dict['nloops']
1972
1973 - def write_loop_num(self, writer, matrix_element,fortran_model):
1974 """ Create the file containing the core subroutine called by CutTools 1975 which contains the Helas calls building the loop""" 1976 1977 replace_dict=copy.copy(matrix_element.rep_dict) 1978 1979 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1980 file = file % replace_dict 1981 writer.writelines(file,context=self.get_context(matrix_element))
1982
1983 - def write_CT_interface(self, writer, matrix_element):
1984 """ We can re-use the mother one for the loop optimized output.""" 1985 LoopProcessExporterFortranSA.write_CT_interface(\ 1986 self, writer, matrix_element,optimized_output=True)
1987
1988 - def write_TIR_interface(self, writer, matrix_element):
1989 """ Create the file TIR_interface.f which does NOT contain the subroutine 1990 defining the loop HELAS-like calls along with the general interfacing 1991 subroutine. """ 1992 1993 # First write TIR_interface which interfaces MG5 with TIR. 1994 replace_dict=copy.copy(matrix_element.rep_dict) 1995 1996 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 1997 1998 # Check which loops have an Higgs effective vertex so as to correctly 1999 # implement CutTools limitation 2000 loop_groups = matrix_element.get('loop_groups') 2001 has_HEFT_vertex = [False]*len(loop_groups) 2002 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2003 for lamp in loop_amp_list: 2004 final_lwf = lamp.get_final_loop_wavefunction() 2005 while not final_lwf is None: 2006 # We define here an HEFT vertex as any vertex built up from 2007 # only massless vectors and scalars (at least one of each) 2008 scalars = len([1 for wf in final_lwf.get('mothers') if 2009 wf.get('spin')==1]) 2010 vectors = len([1 for wf in final_lwf.get('mothers') if 2011 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2012 if scalars>=1 and vectors>=1 and \ 2013 scalars+vectors == len(final_lwf.get('mothers')): 2014 has_HEFT_vertex[i] = True 2015 break 2016 final_lwf = final_lwf.get_loop_mother() 2017 else: 2018 continue 2019 break 2020 2021 has_HEFT_list = [] 2022 chunk_size = 9 2023 for k in xrange(0, len(has_HEFT_vertex), chunk_size): 2024 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2025 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2026 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2027 has_HEFT_vertex[k:k + chunk_size]]))) 2028 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2029 2030 file = file % replace_dict 2031 2032 FPR = q_polynomial.FortranPolynomialRoutines( 2033 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2034 sub_prefix=replace_dict['proc_prefix']) 2035 if self.tir_available_dict['pjfry']: 2036 file += '\n\n'+FPR.write_pjfry_mapping() 2037 if self.tir_available_dict['iregi']: 2038 file += '\n\n'+FPR.write_iregi_mapping() 2039 2040 if writer: 2041 writer.writelines(file,context=self.get_context(matrix_element)) 2042 else: 2043 return file
2044
2045 - def write_GOLEM_interface(self, writer, matrix_element):
2046 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2047 defining the loop HELAS-like calls along with the general interfacing 2048 subroutine. """ 2049 2050 # First write GOLEM_interface which interfaces MG5 with TIR. 2051 replace_dict=copy.copy(matrix_element.rep_dict) 2052 2053 # We finalize TIR result differently wether we used the built-in 2054 # squaring against the born. 2055 if not self.get_context(matrix_element)['AmplitudeReduction']: 2056 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2057 else: 2058 replace_dict['loop_induced_sqsoindex']='' 2059 2060 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2061 2062 file = file % replace_dict 2063 2064 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2065 coef_format=replace_dict['complex_dp_format'],\ 2066 sub_prefix=replace_dict['proc_prefix']) 2067 2068 file += '\n\n'+FPR.write_golem95_mapping() 2069 2070 if writer: 2071 writer.writelines(file,context=self.get_context(matrix_element)) 2072 else: 2073 return file
2074
2075 - def write_polynomial_subroutines(self,writer,matrix_element):
2076 """ Subroutine to create all the subroutines relevant for handling 2077 the polynomials representing the loop numerator """ 2078 2079 # First create 'loop_max_coefs.inc' 2080 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2081 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2082 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2083 %matrix_element.rep_dict) 2084 2085 # Then coef_specs directly in DHELAS if it does not exist already 2086 # 'coef_specs.inc'. If several processes exported different files there, 2087 # it is fine because the overall maximum value will overwrite it in the 2088 # end 2089 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2090 if not os.path.isfile(coef_specs_path): 2091 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2092 IncWriter.writelines("""INTEGER MAXLWFSIZE 2093 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2094 INTEGER VERTEXMAXCOEFS 2095 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2096 %matrix_element.rep_dict) 2097 IncWriter.close() 2098 2099 # List of all subroutines to place there 2100 subroutines=[] 2101 2102 # Start from the routine in the template 2103 replace_dict = copy.copy(matrix_element.rep_dict) 2104 2105 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2106 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2107 # The double precision version of the basic polynomial routines, such as 2108 # create_loop_coefs 2109 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2110 replace_dict['real_format'] = replace_dict['real_dp_format'] 2111 replace_dict['mp_prefix'] = '' 2112 replace_dict['kind'] = 8 2113 replace_dict['zero_def'] = '0.0d0' 2114 replace_dict['one_def'] = '1.0d0' 2115 dp_routine = dp_routine % replace_dict 2116 # The quadruple precision version of the basic polynomial routines 2117 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2118 replace_dict['real_format'] = replace_dict['real_mp_format'] 2119 replace_dict['mp_prefix'] = 'MP_' 2120 replace_dict['kind'] = 16 2121 replace_dict['zero_def'] = '0.0e0_16' 2122 replace_dict['one_def'] = '1.0e0_16' 2123 mp_routine = mp_routine % replace_dict 2124 subroutines.append(dp_routine) 2125 subroutines.append(mp_routine) 2126 2127 # Initialize the polynomial routine writer 2128 poly_writer=q_polynomial.FortranPolynomialRoutines( 2129 matrix_element.get_max_loop_rank(), 2130 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2131 sub_prefix=replace_dict['proc_prefix'], 2132 proc_prefix=replace_dict['proc_prefix'], 2133 mp_prefix='') 2134 # Write the polynomial constant module common to all 2135 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2136 2137 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2138 matrix_element.get_max_loop_rank(), 2139 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2140 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2141 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2142 # The eval subroutine 2143 subroutines.append(poly_writer.write_polynomial_evaluator()) 2144 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2145 # The add coefs subroutine 2146 subroutines.append(poly_writer.write_add_coefs()) 2147 subroutines.append(mp_poly_writer.write_add_coefs()) 2148 # The merging one for creating the loop coefficients 2149 subroutines.append(poly_writer.write_wl_merger()) 2150 subroutines.append(mp_poly_writer.write_wl_merger()) 2151 for wl_update in matrix_element.get_used_wl_updates(): 2152 # We pick here the most appropriate way of computing the 2153 # tensor product depending on the rank of the two tensors. 2154 # The various choices below come out from a careful comparison of 2155 # the different methods using the valgrind profiler 2156 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2157 # If any of the rank is 0, or if they are both equal to 1, 2158 # then we are better off using the full expanded polynomial, 2159 # and let the compiler optimize it. 2160 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2161 wl_update[0],wl_update[1])) 2162 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2163 wl_update[0],wl_update[1])) 2164 elif wl_update[0] >= wl_update[1]: 2165 # If the loop polynomial is larger then we will filter and loop 2166 # over the vertex coefficients first. The smallest product for 2167 # which the routines below could be used is then 2168 # loop_rank_2 x vertex_rank_1 2169 subroutines.append(poly_writer.write_compact_wl_updater(\ 2170 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2171 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2172 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2173 else: 2174 # This happens only when the rank of the updater (vertex coef) 2175 # is larger than the one of the loop coef and none of them is 2176 # zero. This never happens in renormalizable theories but it 2177 # can happen in the HEFT ones or other effective ones. In this 2178 # case the typicaly use of this routine if for the product 2179 # loop_rank_1 x vertex_rank_2 2180 subroutines.append(poly_writer.write_compact_wl_updater(\ 2181 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2182 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2183 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2184 2185 writer.writelines('\n\n'.join(subroutines), 2186 context=self.get_context(matrix_element))
2187
2188 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2189 """Create the write_mp_compute_loop_coefs.f file.""" 2190 2191 if not matrix_element.get('processes') or \ 2192 not matrix_element.get('diagrams'): 2193 return 0 2194 2195 # Set lowercase/uppercase Fortran code 2196 2197 writers.FortranWriter.downcase = False 2198 2199 replace_dict = copy.copy(matrix_element.rep_dict) 2200 2201 # Extract helas calls 2202 squared_orders = matrix_element.get_squared_order_contribs() 2203 split_orders = matrix_element.get('processes')[0].get('split_orders') 2204 2205 born_ct_helas_calls , uvct_helas_calls = \ 2206 fortran_model.get_born_ct_helas_calls(matrix_element, 2207 squared_orders=squared_orders, split_orders=split_orders) 2208 self.turn_to_mp_calls(born_ct_helas_calls) 2209 self.turn_to_mp_calls(uvct_helas_calls) 2210 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2211 matrix_element,group_loops=self.group_loops, 2212 squared_orders=squared_orders,split_orders=split_orders) 2213 # The proc_prefix must be replaced 2214 coef_construction = [c % matrix_element.rep_dict for c 2215 in coef_construction] 2216 self.turn_to_mp_calls(coef_construction) 2217 self.turn_to_mp_calls(coef_merging) 2218 2219 file = open(os.path.join(self.template_dir,\ 2220 'mp_compute_loop_coefs.inc')).read() 2221 2222 # Setup the contextual environment which is used in the splitting 2223 # functions below 2224 context = self.get_context(matrix_element) 2225 file=self.split_HELASCALLS(writer,replace_dict,\ 2226 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2227 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2228 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2229 continue_label = 2000, 2230 momenta_array_name = 'MP_P', 2231 context=context) 2232 file=self.split_HELASCALLS(writer,replace_dict,\ 2233 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2234 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2235 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2236 continue_label = 3000, 2237 momenta_array_name = 'MP_P', 2238 context=context) 2239 file=self.split_HELASCALLS(writer,replace_dict,\ 2240 'mp_helas_calls_split.inc',file,coef_construction,\ 2241 'mp_coef_construction','mp_coef_construction', 2242 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2243 continue_label = 4000, 2244 momenta_array_name = 'MP_P', 2245 context=context) 2246 2247 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2248 2249 file = file % replace_dict 2250 2251 # Write the file 2252 writer.writelines(file,context=context)
2253
2254 - def write_color_matrix_data_file(self, writer, col_matrix):
2255 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2256 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2257 2258 res = [] 2259 for line in range(len(col_matrix._col_basis1)): 2260 numerators = [] 2261 denominators = [] 2262 for row in range(len(col_matrix._col_basis2)): 2263 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2264 numerators.append('%6r'%coeff[0].numerator) 2265 denominators.append('%6r'%( 2266 coeff[0].denominator*(-1 if coeff[1] else 1))) 2267 res.append(' '.join(numerators)) 2268 res.append(' '.join(denominators)) 2269 2270 res.append('EOF') 2271 2272 writer.writelines('\n'.join(res))
2273
2274 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2275 color_basis):
2276 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2277 list of the color_amplitudes in the argument of this function.""" 2278 2279 my_cs = color.ColorString() 2280 2281 res = [] 2282 2283 for jamp_number, coeff_list in enumerate(color_amplitudes): 2284 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2285 # Order the ColorString so that its ordering is canonical. 2286 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2287 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2288 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2289 # A line element is a tuple (numerator, denominator, amplitude_id) 2290 line_element = [] 2291 2292 for (coefficient, amp_number) in coeff_list: 2293 coef = self.cat_coeff(\ 2294 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2295 line_element.append((coef[0].numerator, 2296 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2297 # Sort them by growing amplitude number 2298 line_element.sort(key=lambda el:el[2]) 2299 2300 for i in range(3): 2301 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2302 2303 res.append('EOF') 2304 writer.writelines('\n'.join(res))
2305
2306 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2307 """Writes the file compute_color_flows.f which uses the AMPL results 2308 from a common block to project them onto the color flow space so as 2309 to compute the JAMP quantities. For loop induced processes, this file 2310 will also contain a subroutine computing AMPL**2 for madevent 2311 multichanneling.""" 2312 2313 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2314 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2315 2316 dat_writer = open(pjoin('..','MadLoop5_resources', 2317 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2318 %matrix_element.rep_dict),'w') 2319 self.write_color_flow_coefs_data_file(dat_writer, 2320 loop_col_amps, matrix_element.get('loop_color_basis')) 2321 dat_writer.close() 2322 2323 dat_writer = open(pjoin('..','MadLoop5_resources', 2324 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2325 %matrix_element.rep_dict),'w') 2326 self.write_color_matrix_data_file(dat_writer, 2327 matrix_element.get('color_matrix')) 2328 dat_writer.close() 2329 2330 if matrix_element.get('processes')[0].get('has_born'): 2331 born_col_amps = matrix_element.get_born_color_amplitudes() 2332 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2333 dat_writer = open(pjoin('..','MadLoop5_resources', 2334 '%(proc_prefix)sBornColorFlowCoefs.dat' 2335 %matrix_element.rep_dict),'w') 2336 self.write_color_flow_coefs_data_file(dat_writer, 2337 born_col_amps, matrix_element.get('loop_color_basis')) 2338 dat_writer.close() 2339 2340 dat_writer = open(pjoin('..','MadLoop5_resources', 2341 '%(proc_prefix)sBornColorFlowMatrix.dat' 2342 %matrix_element.rep_dict),'w') 2343 self.write_color_matrix_data_file(dat_writer, 2344 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2345 dat_writer.close() 2346 else: 2347 matrix_element.rep_dict['nBornFlows'] = 0 2348 2349 replace_dict = copy.copy(matrix_element.rep_dict) 2350 2351 # The following variables only have to be defined for the LoopInduced 2352 # output for madevent. 2353 if self.get_context(matrix_element)['MadEventOutput']: 2354 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2355 else: 2356 replace_dict['config_map_definition'] = '' 2357 replace_dict['config_index_map_definition'] = '' 2358 replace_dict['nmultichannels'] = 0 2359 replace_dict['nmultichannel_configs'] = 0 2360 2361 # The nmultichannels entry will be used in the matrix<i> wrappers as 2362 # well, so we add it to the general_replace_dict too. 2363 matrix_element.rep_dict['nmultichannels'] = \ 2364 replace_dict['nmultichannels'] 2365 matrix_element.rep_dict['nmultichannel_configs'] = \ 2366 replace_dict['nmultichannel_configs'] 2367 2368 2369 file = open(os.path.join(self.template_dir,\ 2370 'compute_color_flows.inc')).read()%replace_dict 2371 2372 writer.writelines(file,context=self.get_context(matrix_element))
2373
2374 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2375 """ If processes with different maximum loop wavefunction size or 2376 different maximum loop vertex rank have to be output together, then 2377 the file 'coef.inc' in the HELAS Source folder must contain the overall 2378 maximum of these quantities. It is not safe though, and the user has 2379 been appropriatly warned at the output stage """ 2380 2381 # Remove the existing link 2382 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2383 'coef_specs.inc') 2384 os.remove(coef_specs_path) 2385 2386 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2387 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2388 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2389 overall_max_loop_vert_rank) 2390 # Replace it by the appropriate value 2391 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2392 IncWriter.writelines("""INTEGER MAXLWFSIZE 2393 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2394 INTEGER VERTEXMAXCOEFS 2395 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2396 %{'max_lwf_size':overall_max_lwf_size, 2397 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2398 IncWriter.close()
2399
2400 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2401 split_orders,squared_orders,amps_orders):
2402 """ Sets up the replacement dictionary for the writeout of the steering 2403 file check_sa.f""" 2404 if len(squared_orders)<1: 2405 matrix_element.rep_dict['print_so_loop_results']=\ 2406 "write(*,*) 'No split orders defined.'" 2407 elif len(squared_orders)==1: 2408 matrix_element.rep_dict['set_coupling_target']='' 2409 matrix_element.rep_dict['print_so_loop_results']=\ 2410 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2411 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2412 for i in range(len(split_orders))])) 2413 else: 2414 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2415 '# Here we leave the default target squared split order to -1, meaning that we'+ 2416 ' aim at computing all individual contributions. You can choose otherwise.', 2417 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2418 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2419 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2420 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2421 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2422 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2423 "ELSE", 2424 "write(*,*) ' > accuracy = NA'", 2425 "ENDIF", 2426 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2427 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2428 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2429 ]) for j, so in enumerate(squared_orders)]) 2430 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2431 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2432 ['\n'.join([ 2433 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2434 ['%d'%so_value for so_value in so])), 2435 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2436 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2437 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2438 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2439 ]) for j, so in enumerate(squared_orders)]) 2440 2441 # We must reconstruct here the born squared orders. 2442 squared_born_so_orders = [] 2443 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2444 for j in range(0,i+1): 2445 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2446 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2447 if not key in squared_born_so_orders: 2448 squared_born_so_orders.append(key) 2449 if len(squared_born_so_orders)<1: 2450 matrix_element.rep_dict['print_so_born_results'] = '' 2451 elif len(squared_born_so_orders)==1: 2452 matrix_element.rep_dict['print_so_born_results'] = \ 2453 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2454 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2455 for i in range(len(split_orders))])) 2456 else: 2457 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2458 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2459 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2460 for j, so in enumerate(squared_born_so_orders)]) 2461 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2462 ['\n'.join([ 2463 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2464 ['%d'%so_value for so_value in so])), 2465 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2466 ]) for j, so in enumerate(squared_born_so_orders)]) 2467 2468 # Add a bottom bar to both print_so_[loop|born]_results 2469 matrix_element.rep_dict['print_so_born_results'] += \ 2470 '\nwrite (*,*) "---------------------------------"' 2471 matrix_element.rep_dict['print_so_loop_results'] += \ 2472 '\nwrite (*,*) "---------------------------------"'
2473
2474 - def write_tir_cache_size_include(self, writer):
2475 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2476 cache the the user wishes to employ and the default value for it. 2477 This can have an impact on MadLoop speed when using stability checks 2478 but also impacts in a non-negligible way MadLoop's memory footprint. 2479 It is therefore important that the user can chose its size.""" 2480 2481 # For the standalone optimized output, a size of one is necessary. 2482 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2483 # speed increase with a TIR cache of size 2 due to the structure of the 2484 # calls to MadLoop there. 2485 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2486 writer.writelines(tir_cach_size)
2487
2488 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2489 write_auxiliary_files=True,):
2490 """Create the loop_matrix.f file.""" 2491 2492 if not matrix_element.get('processes') or \ 2493 not matrix_element.get('diagrams'): 2494 return 0 2495 2496 # Set lowercase/uppercase Fortran code 2497 writers.FortranWriter.downcase = False 2498 2499 # Starting off with the treatment of the split_orders since some 2500 # of the information extracted there will come into the 2501 # general_replace_dict. Split orders are abbreviated SO in all the 2502 # keys of the replacement dictionaries. 2503 2504 # Take care of the split_orders 2505 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2506 # Creating here a temporary list containing only the information of 2507 # what are the different squared split orders contributing 2508 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2509 sqso_contribs = [sqso[0] for sqso in squared_orders] 2510 split_orders = matrix_element.get('processes')[0].get('split_orders') 2511 # The entries set in the function below are only for check_sa written 2512 # out in write_loop__matrix_element_v4 (it is however placed here because the 2513 # split order information is only available here). 2514 self.setup_check_sa_replacement_dictionary(matrix_element, 2515 split_orders,sqso_contribs,amps_orders) 2516 2517 # Now recast the split order basis for the loop, born and counterterm 2518 # amplitude into one single splitorderbasis. 2519 overall_so_basis = list(set( 2520 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2521 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2522 # We must re-sort it to make sure it follows an increasing WEIGHT order 2523 order_hierarchy = matrix_element.get('processes')[0]\ 2524 .get('model').get('order_hierarchy') 2525 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2526 set(order_hierarchy.keys()): 2527 overall_so_basis.sort(key= lambda so: 2528 sum([order_hierarchy[split_orders[i]]*order_power for \ 2529 i, order_power in enumerate(so)])) 2530 2531 # Those are additional entries used throughout the different files of 2532 # MadLoop5 2533 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2534 matrix_element.rep_dict['nSO'] = len(split_orders) 2535 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2536 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2537 2538 writers.FortranWriter('nsquaredSO.inc').writelines( 2539 """INTEGER NSQUAREDSO 2540 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2541 2542 replace_dict = copy.copy(matrix_element.rep_dict) 2543 # Build the general array mapping the split orders indices to their 2544 # definition 2545 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2546 overall_so_basis,'AMPSPLITORDERS')) 2547 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2548 sqso_contribs,'SQPLITORDERS')) 2549 2550 # Specify what are the squared split orders selected by the proc def. 2551 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2552 matrix_element.get('processes')[0],sqso_contribs) 2553 2554 # Now we build the different arrays storing the split_orders ID of each 2555 # amp. 2556 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2557 for SO in amps_orders['loop_amp_orders']: 2558 for amp_number in SO[1]: 2559 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2560 2561 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2562 ampSO_list,'LOOPAMPORDERS')) 2563 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2564 for SO in amps_orders['born_amp_orders']: 2565 for amp_number in SO[1]: 2566 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2567 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2568 ampSO_list,'BORNAMPORDERS')) 2569 2570 # We then go to the TIR setup 2571 # The first entry is the CutTools, we make sure it is available 2572 looplibs_av=['.TRUE.'] 2573 # one should be careful about the order in the following as it must match 2574 # the ordering in MadLoopParamsCard. 2575 for tir_lib in ['pjfry','iregi','golem','samurai','ninja']: 2576 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2577 self.tir_available_dict[tir_lib] else '.FALSE.') 2578 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2579 2580 # Helicity offset convention 2581 # For a given helicity, the attached integer 'i' means 2582 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2583 # to helicity number abs(i+HELOFFSET) 2584 # 'i' == -HELOFFSET -> Helicity is analytically zero 2585 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2586 # If it is zero, it is skipped. 2587 # Typically, the hel_offset is 10000 2588 replace_dict['hel_offset'] = 10000 2589 2590 # Extract overall denominator 2591 # Averaging initial state color, spin, and identical FS particles 2592 den_factor_line = self.get_den_factor_line(matrix_element) 2593 replace_dict['den_factor_line'] = den_factor_line 2594 2595 # When the user asks for the polarized matrix element we must 2596 # multiply back by the helicity averaging factor 2597 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2598 2599 if write_auxiliary_files: 2600 # Write out the color matrix 2601 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2602 CMWriter=open(pjoin('..','MadLoop5_resources', 2603 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2604 for ColorLine in CMNum: 2605 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2606 CMWriter.close() 2607 CMWriter=open(pjoin('..','MadLoop5_resources', 2608 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2609 for ColorLine in CMDenom: 2610 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2611 CMWriter.close() 2612 2613 # Write out the helicity configurations 2614 HelConfigs=matrix_element.get_helicity_matrix() 2615 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2616 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2617 for HelConfig in HelConfigs: 2618 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2619 HelConfigWriter.close() 2620 2621 # Extract helas calls 2622 born_ct_helas_calls, uvct_helas_calls = \ 2623 fortran_model.get_born_ct_helas_calls(matrix_element, 2624 squared_orders=squared_orders,split_orders=split_orders) 2625 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2626 matrix_element,group_loops=self.group_loops, 2627 squared_orders=squared_orders,split_orders=split_orders) 2628 2629 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2630 group_loops=self.group_loops, 2631 squared_orders=squared_orders, split_orders=split_orders) 2632 # The proc_prefix must be replaced 2633 coef_construction = [c % matrix_element.rep_dict for c 2634 in coef_construction] 2635 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 2636 2637 file = open(os.path.join(self.template_dir,\ 2638 'loop_matrix_standalone.inc')).read() 2639 2640 # Setup the contextual environment which is used in the splitting 2641 # functions below 2642 context = self.get_context(matrix_element) 2643 file=self.split_HELASCALLS(writer,replace_dict,\ 2644 'helas_calls_split.inc',file,born_ct_helas_calls,\ 2645 'born_ct_helas_calls','helas_calls_ampb', 2646 required_so_broadcaster = 'CT_REQ_SO_DONE', 2647 continue_label = 2000, context = context) 2648 file=self.split_HELASCALLS(writer,replace_dict,\ 2649 'helas_calls_split.inc',file,uvct_helas_calls,\ 2650 'uvct_helas_calls','helas_calls_uvct', 2651 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 2652 continue_label = 3000, context=context) 2653 file=self.split_HELASCALLS(writer,replace_dict,\ 2654 'helas_calls_split.inc',file,coef_construction,\ 2655 'coef_construction','coef_construction', 2656 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 2657 continue_label = 4000, context=context) 2658 file=self.split_HELASCALLS(writer,replace_dict,\ 2659 'helas_calls_split.inc',file,loop_CT_calls,\ 2660 'loop_CT_calls','loop_CT_calls', 2661 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 2662 continue_label = 5000, context=context) 2663 2664 # Add the entries above to the general_replace_dict so that it can be 2665 # used by write_mp_compute_loop_coefs later 2666 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 2667 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 2668 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 2669 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 2670 2671 replace_dict['coef_merging']='\n'.join(coef_merging) 2672 2673 file = file % replace_dict 2674 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \ 2675 loop_CT_calls)) 2676 if writer: 2677 # Write the file 2678 writer.writelines(file,context=context) 2679 return number_of_calls 2680 else: 2681 # Return it to be written along with the others 2682 return number_of_calls, file
2683 2684 #=============================================================================== 2685 # LoopProcessExporterFortranSA 2686 #===============================================================================
2687 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 2688 export_v4.ProcessExporterFortranMatchBox):
2689 """Class to take care of exporting a set of loop matrix elements in the 2690 Fortran format.""" 2691 2692 default_opt = {'clean': False, 'complex_mass':False, 2693 'export_format':'madloop_matchbox', 'mp':True, 2694 'loop_dir':'', 'cuttools_dir':'', 2695 'fortran_compiler':'gfortran', 2696 'output_dependencies':'external', 2697 'sa_symmetry':True} 2698 2699 2700
2701 - def get_color_string_lines(self, matrix_element):
2702 """Return the color matrix definition lines for this matrix element. Split 2703 rows in chunks of size n.""" 2704 2705 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
2706 2707
2708 - def get_JAMP_lines(self, *args, **opts):
2709 """Adding leading color part of the colorflow""" 2710 2711 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
2712
2713 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
2714 """ To not mix notations between borns and virtuals we call it here also MG5 """ 2715 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
2716 2717 2718 #=============================================================================== 2719 # LoopInducedExporter 2720 #===============================================================================
2721 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
2722 """ A class to specify all the functions common to LoopInducedExporterMEGroup 2723 and LoopInducedExporterMENoGroup (but not relevant for the original 2724 Madevent exporters)""" 2725 2726 madloop_makefile_name = 'makefile_MadLoop' 2727 2728
2729 - def __init__(self, *args, **opts):
2730 """ Initialize the process, setting the proc characteristics.""" 2731 super(LoopInducedExporterME, self).__init__(*args, **opts) 2732 self.proc_characteristic['loop_induced'] = True
2733
2734 - def get_context(self,*args,**opts):
2735 """ Make sure that the contextual variable MadEventOutput is set to 2736 True for this exporter""" 2737 2738 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 2739 context['MadEventOutput'] = True 2740 return context
2741 2742
2743 - def get_source_libraries_list(self):
2744 """ Returns the list of libraries to be compiling when compiling the 2745 SOURCE directory. It is different for loop_induced processes and 2746 also depends on the value of the 'output_dependencies' option""" 2747 2748 libraries_list = super(LoopInducedExporterME,self).\ 2749 get_source_libraries_list() 2750 2751 if self.dependencies=='internal': 2752 libraries_list.append('$(LIBDIR)libcts.$(libext)') 2753 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 2754 2755 return libraries_list
2756 2763
2764 - def copy_v4template(self, *args, **opts):
2765 """Pick the right mother functions 2766 """ 2767 # Call specifically the necessary building functions for the mixed 2768 # template setup for both MadEvent and MadLoop standalone 2769 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 2770 copy_Source_makefile=False) 2771 2772 LoopProcessOptimizedExporterFortranSA.\ 2773 loop_optimized_additional_template_setup(self)
2774 2775 2776 #=========================================================================== 2777 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 2778 #===========================================================================
2779 - def finalize_v4_directory(self, matrix_elements, history = "", makejpg = False, 2780 online = False, compiler='g77'):
2781 """Function to finalize v4 directory, for inheritance. 2782 """ 2783 2784 self.proc_characteristic['loop_induced'] = True
2785 2786 # This can be uncommented if one desires to have the MadLoop 2787 # initialization performed at the end of the output phase. 2788 # Alternatively, one can simply execute the command 'initMadLoop' in 2789 # the madevent interactive interface after the output. 2790 # from madgraph.interface.madevent_interface import MadLoopInitializer 2791 # MadLoopInitializer.init_MadLoop(self.dir_path, 2792 # subproc_prefix=self.SubProc_prefix, MG_options=None) 2793
2794 - def write_tir_cache_size_include(self, writer):
2795 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2796 cache the the user wishes to employ and the default value for it. 2797 This can have an impact on MadLoop speed when using stability checks 2798 but also impacts in a non-negligible way MadLoop's memory footprint. 2799 It is therefore important that the user can chose its size.""" 2800 2801 # In this case of MadLoop+MadEvent output, we set it to 2 because we 2802 # gain further speed increase with a TIR cache of size 2 due to the 2803 # the fact that we call MadLoop once per helicity configuration in this 2804 # case. 2805 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 2806 writer.writelines(tir_cach_size)
2807
2808 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 2809 proc_id = None, config_map = [], subproc_number = None):
2810 """ Write it the wrapper to call the ML5 subroutine in the library.""" 2811 2812 # Generating the MadEvent wrapping ME's routines 2813 if not matrix_element.get('processes') or \ 2814 not matrix_element.get('diagrams'): 2815 return 0 2816 2817 if not isinstance(writer, writers.FortranWriter): 2818 raise writers.FortranWriter.FortranWriterError(\ 2819 "writer not FortranWriter") 2820 2821 replace_dict = copy.copy(matrix_element.rep_dict) 2822 2823 # Extract version number and date from VERSION file 2824 info_lines = self.get_mg5_info_lines() 2825 replace_dict['info_lines'] = info_lines 2826 2827 # Extract process info lines 2828 process_lines = self.get_process_info_lines(matrix_element) 2829 replace_dict['process_lines'] = process_lines 2830 2831 # Set proc_id 2832 # It can be set to None when write_matrix_element_v4 is called without 2833 # grouping. In this case the subroutine SMATRIX should take an empty 2834 # suffix. 2835 if proc_id is None: 2836 replace_dict['proc_id'] = '' 2837 else: 2838 replace_dict['proc_id'] = proc_id 2839 2840 #set the average over the number of initial helicities 2841 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2842 2843 # Extract helicity lines 2844 helicity_lines = self.get_helicity_lines(matrix_element) 2845 replace_dict['helicity_lines'] = helicity_lines 2846 2847 2848 # Extract ndiags 2849 ndiags = len(matrix_element.get('diagrams')) 2850 replace_dict['ndiags'] = ndiags 2851 2852 # Set define_iconfigs_lines 2853 replace_dict['define_iconfigs_lines'] = \ 2854 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 2855 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 2856 2857 if proc_id: 2858 # Set lines for subprocess group version 2859 # Set define_iconfigs_lines 2860 replace_dict['define_iconfigs_lines'] += \ 2861 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 2862 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 2863 # Set set_amp2_line 2864 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 2865 else: 2866 # Standard running 2867 # Set set_amp2_line 2868 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 2869 2870 # If group_numer 2871 replace_dict['ml_prefix'] = \ 2872 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 2873 2874 # Extract ncolor 2875 ncolor = max(1, len(matrix_element.get('color_basis'))) 2876 replace_dict['ncolor'] = ncolor 2877 2878 n_tot_diags = len(matrix_element.get_loop_diagrams()) 2879 replace_dict['n_tot_diags'] = n_tot_diags 2880 2881 file = open(pjoin(_file_path, \ 2882 'iolibs/template_files/%s' % self.matrix_file)).read() 2883 file = file % replace_dict 2884 2885 # Write the file 2886 writer.writelines(file) 2887 2888 return 0, ncolor
2889
2890 - def get_amp2_lines(self, *args, **opts):
2891 """Make sure the function is implemented in the daughters""" 2892 2893 raise NotImplemented, 'The function get_amp2_lines must be called in '+\ 2894 ' the daugthers of LoopInducedExporterME'
2895 2896 #=============================================================================== 2897 # LoopInducedExporterMEGroup 2898 #===============================================================================
2899 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 2900 export_v4.ProcessExporterFortranMEGroup):
2901 """Class to take care of exporting a set of grouped loop induced matrix 2902 elements""" 2903 2904 matrix_file = "matrix_loop_induced_madevent_group.inc" 2905 2911
2912 - def write_source_makefile(self, *args, **opts):
2913 """Pick the correct write_source_makefile function from 2914 ProcessExporterFortranMEGroup""" 2915 2916 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 2917 *args, **opts)
2918
2919 - def copy_v4template(self, *args, **opts):
2920 """Pick the right mother functions 2921 """ 2922 # Call specifically the necessary building functions for the mixed 2923 # template setup for both MadEvent and MadLoop standalone 2924 2925 # Start witht the MadEvent one 2926 export_v4.ProcessExporterFortranMEGroup.copy_v4template(self,*args,**opts) 2927 2928 # Then the MadLoop-standalone related one 2929 LoopInducedExporterME.copy_v4template(self, *args, **opts)
2930
2931 - def finalize_v4_directory(self, *args, **opts):
2932 """Pick the right mother functions 2933 """ 2934 # Call specifically what finalize_v4_directory must be used, so that the 2935 # MRO doesn't interfere. 2936 2937 self.proc_characteristic['loop_induced'] = True 2938 2939 export_v4.ProcessExporterFortranMEGroup.finalize_v4_directory( 2940 self,*args,**opts) 2941 2942 # And the finilize_v4 from LoopInducedExporterME which essentially takes 2943 # care of MadLoop virtuals initialization 2944 LoopInducedExporterME.finalize_v4_directory(self,*args,**opts)
2945
2946 - def generate_subprocess_directory_v4(self, subproc_group, 2947 fortran_model,group_number):
2948 """Generate the Pn directory for a subprocess group in MadEvent, 2949 including the necessary matrix_N.f files, configs.inc and various 2950 other helper files""" 2951 2952 # Generate the MadLoop files 2953 calls = 0 2954 matrix_elements = subproc_group.get('matrix_elements') 2955 for ime, matrix_element in enumerate(matrix_elements): 2956 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 2957 group_number = group_number, proc_id = str(ime+1), 2958 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 2959 config_map = subproc_group.get('diagram_maps')[ime]) 2960 2961 # Then generate the MadEvent files 2962 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory_v4( 2963 self, subproc_group,fortran_model,group_number) 2964 2965 return calls
2966
2967 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
2968 """Return the various replacement dictionary inputs necessary for the 2969 multichanneling amp2 definition for the loop-induced MadEvent output. 2970 """ 2971 2972 if not config_map: 2973 raise MadGraph5Error, 'A multi-channeling configuration map is '+\ 2974 ' necessary for the MadEvent Loop-induced output with grouping.' 2975 2976 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 2977 2978 ret_lines = [] 2979 # In this case, we need to sum up all amplitudes that have 2980 # identical topologies, as given by the config_map (which 2981 # gives the topology/config for each of the diagrams 2982 diagrams = matrix_element.get('diagrams') 2983 2984 # Note that we need to use AMP2 number corresponding to the first 2985 # diagram number used for that AMP2. 2986 # The dictionary below maps the config ID to this corresponding first 2987 # diagram number 2988 config_index_map = {} 2989 # For each diagram number, the dictionary below gives the config_id it 2990 # belongs to or 0 if it doesn't belong to any. 2991 loop_amp_ID_to_config = {} 2992 2993 # Combine the diagrams with identical topologies 2994 config_to_diag_dict = {} 2995 for idiag, diag in enumerate(matrix_element.get('diagrams')): 2996 try: 2997 config_to_diag_dict[config_map[idiag]].append(idiag) 2998 except KeyError: 2999 config_to_diag_dict[config_map[idiag]] = [idiag] 3000 3001 for config in sorted(config_to_diag_dict.keys()): 3002 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3003 3004 # First add the UV and R2 counterterm amplitudes of each selected 3005 # diagram for the multichannel config 3006 CT_amp_numbers = [a.get('number') for a in \ 3007 sum([diagrams[idiag].get_ct_amplitudes() for \ 3008 idiag in config_to_diag_dict[config]], [])] 3009 3010 for CT_amp_number in CT_amp_numbers: 3011 loop_amp_ID_to_config[CT_amp_number] = config 3012 3013 # Now add here the loop amplitudes. 3014 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3015 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3016 idiag in config_to_diag_dict[config]], [])] 3017 3018 for loop_amp_number in loop_amp_numbers: 3019 loop_amp_ID_to_config[loop_amp_number] = config 3020 3021 # Notice that the config_id's are not necessarily sequential here, so 3022 # the size of the config_index_map array has to be the maximum over all 3023 # config_ids. 3024 # config_index_map should never be empty unless there was no diagram, 3025 # so the expression below is ok. 3026 n_configs = max(config_index_map.keys()) 3027 replace_dict['nmultichannel_configs'] = n_configs 3028 3029 # We must fill the empty entries of the map with the dummy amplitude 3030 # number 0. 3031 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3032 for i in range(1,n_configs+1)] 3033 # Now the placeholder 'nmultichannels' refers to the number of 3034 # multi-channels which are contributing, so we must filter out zeros. 3035 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3036 3037 # Now write the amp2 related inputs in the replacement dictionary 3038 res_list = [] 3039 chunk_size = 6 3040 for k in xrange(0, len(conf_list), chunk_size): 3041 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3042 (k + 1, min(k + chunk_size, len(conf_list)), 3043 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3044 3045 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3046 3047 res_list = [] 3048 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3049 amp_list = [loop_amp_ID_to_config[i] for i in \ 3050 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3051 chunk_size = 6 3052 for k in xrange(0, len(amp_list), chunk_size): 3053 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3054 (k + 1, min(k + chunk_size, len(amp_list)), 3055 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3056 3057 replace_dict['config_map_definition'] = '\n'.join(res_list) 3058 3059 return
3060 3061 #=============================================================================== 3062 # LoopInducedExporterMENoGroup 3063 #===============================================================================
3064 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3065 export_v4.ProcessExporterFortranME):
3066 """Class to take care of exporting a set of individual loop induced matrix 3067 elements""" 3068 3069 matrix_file = "matrix_loop_induced_madevent.inc" 3070 3076
3077 - def write_source_makefile(self, *args, **opts):
3078 """Pick the correct write_source_makefile function from 3079 ProcessExporterFortran""" 3080 3081 super(export_v4.ProcessExporterFortranME,self).\ 3082 write_source_makefile(*args, **opts)
3083
3084 - def copy_v4template(self, *args, **opts):
3085 """Pick the right mother functions 3086 """ 3087 # Call specifically the necessary building functions for the mixed 3088 # template setup for both MadEvent and MadLoop standalone 3089 3090 # Start witht the MadEvent one 3091 export_v4.ProcessExporterFortranME.copy_v4template(self,*args,**opts) 3092 3093 # Then the MadLoop-standalone related one 3094 LoopInducedExporterME.copy_v4template(self, *args, **opts)
3095
3096 - def finalize_v4_directory(self, *args, **opts):
3097 """Pick the right mother functions 3098 """ 3099 3100 self.proc_characteristic['loop_induced'] = True 3101 # Call specifically what finalize_v4_directory must be used, so that the 3102 # MRO doesn't interfere. 3103 export_v4.ProcessExporterFortranME.finalize_v4_directory( 3104 self,*args,**opts) 3105 3106 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3107 # care of MadLoop virtuals initialization 3108 LoopInducedExporterME.finalize_v4_directory(self,*args,**opts)
3109
3110 - def generate_subprocess_directory_v4(self, matrix_element, fortran_model, me_number):
3111 """Generate the Pn directory for a subprocess group in MadEvent, 3112 including the necessary matrix_N.f files, configs.inc and various 3113 other helper files""" 3114 3115 # Then generate the MadLoop files 3116 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3117 group_number = me_number) 3118 3119 3120 # First generate the MadEvent files 3121 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory_v4( 3122 self, matrix_element, fortran_model, me_number) 3123 return calls
3124
3125 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3126 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3127 3128 if config_map: 3129 raise MadGraph5Error, 'A configuration map should not be specified'+\ 3130 ' for the Loop induced exporter without grouping.' 3131 3132 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3133 # Get minimum legs in a vertex 3134 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3135 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3136 minvert = min(vert_list) if vert_list!=[] else 0 3137 3138 # Note that we need to use AMP2 number corresponding to the first 3139 # diagram number used for that AMP2. 3140 # The dictionary below maps the config ID to this corresponding first 3141 # diagram number 3142 config_index_map = {} 3143 # For each diagram number, the dictionary below gives the config_id it 3144 # belongs to or 0 if it doesn't belong to any. 3145 loop_amp_ID_to_config = {} 3146 3147 n_configs = 0 3148 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3149 # Ignore any diagrams with 4-particle vertices. 3150 use_for_multichanneling = True 3151 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3152 use_for_multichanneling = False 3153 curr_config = 0 3154 else: 3155 n_configs += 1 3156 curr_config = n_configs 3157 3158 if not use_for_multichanneling: 3159 if 0 not in config_index_map: 3160 config_index_map[0] = idiag + 1 3161 else: 3162 config_index_map[curr_config] = idiag + 1 3163 3164 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3165 for CT_amp in CT_amps: 3166 loop_amp_ID_to_config[CT_amp] = curr_config 3167 3168 Loop_amps = [a.get('amplitudes')[0].get('number') 3169 for a in diag.get_loop_amplitudes()] 3170 for Loop_amp in Loop_amps: 3171 loop_amp_ID_to_config[Loop_amp] = curr_config 3172 3173 # Now write the amp2 related inputs in the replacement dictionary 3174 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3175 replace_dict['nmultichannel_configs'] = n_configs 3176 # Now the placeholder 'nmultichannels' refers to the number of 3177 # multi-channels which are contributing which, in the non-grouped case 3178 # is always equal to the total number of multi-channels. 3179 replace_dict['nmultichannels'] = n_configs 3180 3181 res_list = [] 3182 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3183 if i!=0] 3184 chunk_size = 6 3185 for k in xrange(0, len(conf_list), chunk_size): 3186 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3187 (k + 1, min(k + chunk_size, len(conf_list)), 3188 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3189 3190 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3191 3192 res_list = [] 3193 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3194 amp_list = [loop_amp_ID_to_config[i] for i in \ 3195 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3196 chunk_size = 6 3197 for k in xrange(0, len(amp_list), chunk_size): 3198 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3199 (k + 1, min(k + chunk_size, len(amp_list)), 3200 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3201 3202 replace_dict['config_map_definition'] = '\n'.join(res_list)
3203