Package madgraph :: Package loop :: Module loop_exporters
[hide private]
[frames] | no frames]

Source Code for Module madgraph.loop.loop_exporters

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to v4 format.""" 
  16   
  17  import copy 
  18  import fractions 
  19  import glob 
  20  import logging 
  21  import os 
  22  import stat 
  23  import sys 
  24  import re 
  25  import shutil 
  26  import subprocess 
  27  import itertools 
  28  import time 
  29  import datetime 
  30   
  31   
  32  import aloha 
  33   
  34  import madgraph.core.base_objects as base_objects 
  35  import madgraph.core.color_algebra as color 
  36  import madgraph.core.helas_objects as helas_objects 
  37  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  38  import madgraph.iolibs.drawing_eps as draw 
  39  import madgraph.iolibs.files as files 
  40  import madgraph.iolibs.group_subprocs as group_subprocs 
  41  import madgraph.various.misc as misc 
  42  import madgraph.various.q_polynomial as q_polynomial 
  43  import madgraph.iolibs.file_writers as writers 
  44  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  45  import madgraph.iolibs.template_files as template_files 
  46  import madgraph.iolibs.ufo_expression_parsers as parsers 
  47  import madgraph.iolibs.export_v4 as export_v4 
  48  import madgraph.various.diagram_symmetry as diagram_symmetry 
  49  import madgraph.various.process_checks as process_checks 
  50  import madgraph.various.progressbar as pbar 
  51  import madgraph.various.q_polynomial as q_polynomial 
  52  import madgraph.core.color_amp as color_amp 
  53  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  54  import models.check_param_card as check_param_card 
  55  from madgraph.loop.loop_base_objects import LoopDiagram 
  56  from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles 
  57   
  58  import madgraph.various.banner as banner_mod 
  59   
  60  pjoin = os.path.join 
  61   
  62  import aloha.create_aloha as create_aloha 
  63  import models.write_param_card as param_writer 
  64  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  65  from madgraph.iolibs.files import cp, ln, mv 
  66  pjoin = os.path.join 
  67  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  68  logger = logging.getLogger('madgraph.loop_exporter') 
  69   
  70  #=============================================================================== 
  71  # LoopExporterFortran 
  72  #=============================================================================== 
73 -class LoopExporterFortran(object):
74 """ Class to define general helper functions to the different 75 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both 76 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...). 77 It plays the same role as ProcessExporterFrotran and simply defines here 78 loop-specific helpers functions necessary for all loop exporters. 79 Notice that we do not have LoopExporterFortran inheriting from 80 ProcessExporterFortran but give access to arguments like dir_path and 81 clean using options. This avoids method resolution object ambiguity""" 82 83 default_opt = {'clean': False, 'complex_mass':False, 84 'export_format':'madloop', 'mp':True, 85 'loop_dir':'', 'cuttools_dir':'', 86 'fortran_compiler':'gfortran', 87 'SubProc_prefix': 'P', 88 'output_dependencies': 'external', 89 'compute_color_flows': False, 90 'mode':''} 91 92 include_names = {'ninja' : 'mninja.mod', 93 'golem' : 'generic_function_1p.mod', 94 'samurai':'msamurai.mod', 95 'collier': 'collier.mod'} 96
97 - def __init__(self, dir_path = "", opt=None):
98 """Initiate the LoopExporterFortran with directory information on where 99 to find all the loop-related source files, like CutTools""" 100 101 102 self.opt = dict(self.default_opt) 103 if opt: 104 self.opt.update(opt) 105 106 self.SubProc_prefix = self.opt['SubProc_prefix'] 107 self.loop_dir = self.opt['loop_dir'] 108 self.cuttools_dir = self.opt['cuttools_dir'] 109 self.fortran_compiler = self.opt['fortran_compiler'] 110 self.dependencies = self.opt['output_dependencies'] 111 self.compute_color_flows = self.opt['compute_color_flows'] 112 113 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
114 115 189
190 - def get_aloha_model(self, model):
191 """ Caches the aloha model created here as an attribute of the loop 192 exporter so that it can later be used in the LoopHelasMatrixElement 193 in the function compute_all_analytic_information for recycling aloha 194 computations across different LoopHelasMatrixElements steered by the 195 same loop exporter. 196 """ 197 if not hasattr(self, 'aloha_model'): 198 self.aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath'))) 199 return self.aloha_model
200 201 #=========================================================================== 202 # write the multiple-precision header files 203 #===========================================================================
204 - def write_mp_files(self, writer_mprec, writer_mpc):
205 """Write the cts_mprec.h and cts_mpc.h""" 206 207 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read() 208 writer_mprec.writelines(file) 209 210 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read() 211 file = file.replace('&','') 212 writer_mpc.writelines(file) 213 214 return True
215 216 #=============================================================================== 217 # LoopProcessExporterFortranSA 218 #===============================================================================
219 -class LoopProcessExporterFortranSA(LoopExporterFortran, 220 export_v4.ProcessExporterFortranSA):
221 222 """Class to take care of exporting a set of loop matrix elements in the 223 Fortran format.""" 224 225 template_dir=os.path.join(_file_path,'iolibs/template_files/loop') 226 madloop_makefile_name = 'makefile' 227 228 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner( 229 style='classic2', color='green', 230 top_frame_char = '=', bottom_frame_char = '=', 231 left_frame_char = '{',right_frame_char = '}', 232 print_frame=True, side_margin = 7, up_margin = 1) 233
234 - def __init__(self, *args, **opts):
235 super(LoopProcessExporterFortranSA,self).__init__(*args,**opts) 236 self.unique_id=0 # to allow collier to distinguish the various loop subprocesses
237
238 - def copy_template(self, model):
239 """Additional actions needed to setup the Template. 240 """ 241 super(LoopProcessExporterFortranSA, self).copy_template(model) 242 243 self.loop_additional_template_setup()
244
245 - def loop_additional_template_setup(self, copy_Source_makefile = True):
246 """ Perform additional actions specific for this class when setting 247 up the template with the copy_template function.""" 248 249 # We must change some files to their version for NLO computations 250 cpfiles= ["Cards/MadLoopParams.dat", 251 "SubProcesses/MadLoopParamReader.f", 252 "SubProcesses/MadLoopParams.inc"] 253 if copy_Source_makefile: 254 cpfiles.append("Source/makefile") 255 256 for file in cpfiles: 257 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 258 os.path.join(self.dir_path, file)) 259 260 # Also put a copy of MadLoopParams.dat into MadLoopParams_default.dat 261 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), 262 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat')) 263 264 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.loop_dir,'StandAlone', 265 'Cards', 'MadLoopParams.dat')) 266 # write the output file 267 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses", 268 "MadLoopParams.dat")) 269 270 # We might need to give a different name to the MadLoop makefile\ 271 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'), 272 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name)) 273 274 # Write SubProcesses/MadLoop_makefile_definitions with dummy variables 275 # for the non-optimized output 276 link_tir_libs=[] 277 tir_libs=[] 278 279 filePath = pjoin(self.dir_path, 'SubProcesses', 280 'MadLoop_makefile_definitions') 281 calls = self.write_loop_makefile_definitions( 282 writers.MakefileWriter(filePath),link_tir_libs,tir_libs) 283 284 # We need minimal editing of MadLoopCommons.f 285 # For the optimized output, this file will be overwritten once the 286 # availability of COLLIER has been determined. 287 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 288 "SubProcesses","MadLoopCommons.inc")).read() 289 writer = writers.FortranWriter(os.path.join(self.dir_path, 290 "SubProcesses","MadLoopCommons.f")) 291 writer.writelines(MadLoopCommon%{ 292 'print_banner_commands':self.MadLoop_banner}, context={ 293 'collier_available':False}) 294 writer.close() 295 296 # Copy the whole MadLoop5_resources directory (empty at this stage) 297 if not os.path.exists(pjoin(self.dir_path,'SubProcesses', 298 'MadLoop5_resources')): 299 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses', 300 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses')) 301 302 # Link relevant cards from Cards inside the MadLoop5_resources 303 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'), 304 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 305 ln(pjoin(self.dir_path,'Cards','param_card.dat'), 306 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 307 ln(pjoin(self.dir_path,'Cards','ident_card.dat'), 308 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources')) 309 310 # And remove check_sa in the SubProcess folder since now there is a 311 # check_sa tailored to each subprocess. 312 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')): 313 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f')) 314 315 cwd = os.getcwd() 316 dirpath = os.path.join(self.dir_path, 'SubProcesses') 317 try: 318 os.chdir(dirpath) 319 except os.error: 320 logger.error('Could not cd to directory %s' % dirpath) 321 return 0 322 323 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 324 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 325 writers.FortranWriter('cts_mpc.h')) 326 327 # Return to original PWD 328 os.chdir(cwd) 329 330 # We must link the CutTools to the Library folder of the active Template 331 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
332 333 # This function is placed here and not in optimized exporterd, 334 # because the same makefile.inc should be used in all cases.
335 - def write_loop_makefile_definitions(self, writer, link_tir_libs, 336 tir_libs,tir_include=[]):
337 """ Create the file makefile which links to the TIR libraries.""" 338 339 file = open(os.path.join(self.loop_dir,'StandAlone', 340 'SubProcesses','MadLoop_makefile_definitions.inc')).read() 341 replace_dict={} 342 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 343 replace_dict['tir_libs']=' '.join(tir_libs) 344 replace_dict['dotf']='%.f' 345 replace_dict['prefix']= self.SubProc_prefix 346 replace_dict['doto']='%.o' 347 replace_dict['tir_include']=' '.join(tir_include) 348 file=file%replace_dict 349 if writer: 350 writer.writelines(file) 351 else: 352 return file
353
354 - def convert_model(self, model, wanted_lorentz = [], 355 wanted_couplings = []):
356 """ Caches the aloha model created here when writing out the aloha 357 fortran subroutine. 358 """ 359 self.get_aloha_model(model) 360 super(LoopProcessExporterFortranSA, self).convert_model(model, 361 wanted_lorentz = wanted_lorentz, wanted_couplings = wanted_couplings)
362
363 - def get_ME_identifier(self, matrix_element, 364 group_number = None, group_elem_number = None):
365 """ A function returning a string uniquely identifying the matrix 366 element given in argument so that it can be used as a prefix to all 367 MadLoop5 subroutines and common blocks related to it. This allows 368 to compile several processes into one library as requested by the 369 BLHA (Binoth LesHouches Accord) guidelines. 370 The arguments group_number and proc_id are just for the LoopInduced 371 output with MadEvent.""" 372 373 # When disabling the loop grouping in the LoopInduced MadEvent output, 374 # we have only the group_number set and the proc_id set to None. In this 375 # case we don't print the proc_id. 376 if (not group_number is None) and group_elem_number is None: 377 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'), 378 group_number) 379 elif group_number is None or group_elem_number is None: 380 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id') 381 else: 382 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'), 383 group_number, group_elem_number)
384
385 - def get_SubProc_folder_name(self, process, 386 group_number = None, group_elem_number = None):
387 """Returns the name of the SubProcess directory, which can contain 388 the process goup and group element number for the case of loop-induced 389 integration with MadEvent.""" 390 391 # When disabling the loop grouping in the LoopInduced MadEvent output, 392 # we have only the group_number set and the proc_id set to None. In this 393 # case we don't print the proc_id. 394 if not group_number is None and group_elem_number is None: 395 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'), 396 group_number,process.shell_string(print_id=False)) 397 elif group_number is None or group_elem_number is None: 398 return "%s%s" %(self.SubProc_prefix,process.shell_string()) 399 else: 400 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'), 401 group_number, group_elem_number,process.shell_string(print_id=False))
402 403 #=========================================================================== 404 # Set the compiler to be gfortran for the loop processes. 405 #===========================================================================
406 - def compiler_choice(self, compiler=export_v4.default_compiler):
407 """ Different daughter classes might want different compilers. 408 Here, the gfortran compiler is used throughout the compilation 409 (mandatory for CutTools written in f90) """ 410 if isinstance(compiler, str): 411 fortran_compiler = compiler 412 compiler = export_v4.default_compiler 413 compiler['fortran'] = fortran_compiler 414 415 if not compiler['fortran'] is None and not \ 416 any([name in compiler['fortran'] for name in \ 417 ['gfortran','ifort']]): 418 logger.info('For loop processes, the compiler must be fortran90'+\ 419 'compatible, like gfortran.') 420 compiler['fortran'] = 'gfortran' 421 self.set_compiler(compiler,True) 422 else: 423 self.set_compiler(compiler) 424 425 self.set_cpp_compiler(compiler['cpp'])
426
427 - def turn_to_mp_calls(self, helas_calls_list):
428 # Prepend 'MP_' to all the helas calls in helas_calls_list. 429 # Might look like a brutal unsafe implementation, but it is not as 430 # these calls are built from the properties of the HELAS objects and 431 # whether they are evaluated in double or quad precision is none of 432 # their business but only relevant to the output algorithm. 433 # Also the cast to complex masses DCMPLX(*) must be replaced by 434 # CMPLX(*,KIND=16) 435 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE) 436 437 def replaceWith(match_obj): 438 return match_obj.group('toSub')+'MP_'
439 440 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\ 441 re.IGNORECASE | re.MULTILINE) 442 443 for i, helas_call in enumerate(helas_calls_list): 444 new_helas_call=MP.sub(replaceWith,helas_call) 445 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\ 446 new_helas_call)
447 451 459
460 - def make(self):
461 """ Compiles the additional dependences for loop (such as CutTools).""" 462 super(LoopProcessExporterFortranSA, self).make() 463 464 # make CutTools (only necessary with MG option output_dependencies='internal') 465 libdir = os.path.join(self.dir_path,'lib') 466 sourcedir = os.path.join(self.dir_path,'Source') 467 if self.dependencies=='internal': 468 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 469 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 470 if os.path.exists(pjoin(sourcedir,'CutTools')): 471 logger.info('Compiling CutTools (can take a couple of minutes) ...') 472 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 473 logger.info(' ...done.') 474 else: 475 raise MadGraph5Error('Could not compile CutTools because its'+\ 476 ' source directory could not be found in the SOURCE folder.') 477 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 478 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 479 raise MadGraph5Error('CutTools compilation failed.') 480 481 # Verify compatibility between current compiler and the one which was 482 # used when last compiling CutTools (if specified). 483 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 484 libdir, 'libcts.a')))),'compiler_version.log') 485 if os.path.exists(compiler_log_path): 486 compiler_version_used = open(compiler_log_path,'r').read() 487 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 488 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 489 if os.path.exists(pjoin(sourcedir,'CutTools')): 490 logger.info('CutTools was compiled with a different fortran'+\ 491 ' compiler. Re-compiling it now...') 492 misc.compile(['cleanCT'], cwd = sourcedir) 493 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 494 logger.info(' ...done.') 495 else: 496 raise MadGraph5Error("CutTools installation in %s"\ 497 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 498 " seems to have been compiled with a different compiler than"+\ 499 " the one specified in MG5_aMC. Please recompile CutTools.")
500
501 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
502 """Concatenate the coefficient information to reduce it to 503 (fraction, is_imaginary) """ 504 505 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power 506 507 return (total_coeff, is_imaginary)
508
509 - def get_amp_to_jamp_map(self, col_amps, n_amps):
510 """ Returns a list with element 'i' being a list of tuples corresponding 511 to all apparition of amplitude number 'i' in the jamp number 'j' 512 with coeff 'coeff_j'. The format of each tuple describing an apparition 513 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag).""" 514 515 if(isinstance(col_amps,list)): 516 if(col_amps and isinstance(col_amps[0],list)): 517 color_amplitudes=col_amps 518 else: 519 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 520 else: 521 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map" 522 523 # To store the result 524 res_list = [[] for i in range(n_amps)] 525 for i, coeff_list in enumerate(color_amplitudes): 526 for (coefficient, amp_number) in coeff_list: 527 res_list[amp_number-1].append((i,self.cat_coeff(\ 528 coefficient[0],coefficient[1],coefficient[2],coefficient[3]))) 529 530 return res_list
531
532 - def get_color_matrix(self, matrix_element):
533 """Return the color matrix definition lines. This color matrix is of size 534 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born 535 amplitude.""" 536 537 logger.info('Computing diagram color coefficients') 538 539 # The two lists have a list of tuples at element 'i' which correspond 540 # to all apparitions of loop amplitude number 'i' in the jampl number 'j' 541 # with coeff 'coeffj'. The format of each tuple describing an apparition 542 # is (j, coeffj). 543 ampl_to_jampl=self.get_amp_to_jamp_map(\ 544 matrix_element.get_loop_color_amplitudes(), 545 matrix_element.get_number_of_loop_amplitudes()) 546 if matrix_element.get('processes')[0].get('has_born'): 547 ampb_to_jampb=self.get_amp_to_jamp_map(\ 548 matrix_element.get_born_color_amplitudes(), 549 matrix_element.get_number_of_born_amplitudes()) 550 else: 551 ampb_to_jampb=ampl_to_jampl 552 # Below is the original color matrix multiplying the JAMPS 553 if matrix_element.get('color_matrix'): 554 ColorMatrixDenom = \ 555 matrix_element.get('color_matrix').get_line_denominators() 556 ColorMatrixNum = [ matrix_element.get('color_matrix').\ 557 get_line_numerators(index, denominator) for 558 (index, denominator) in enumerate(ColorMatrixDenom) ] 559 else: 560 ColorMatrixDenom= [1] 561 ColorMatrixNum = [[1]] 562 563 # Below is the final color matrix output 564 ColorMatrixNumOutput=[] 565 ColorMatrixDenomOutput=[] 566 567 # Now we construct the color factors between each born and loop amplitude 568 # by scanning their contributions to the different jamps. 569 start = time.time() 570 progress_bar = None 571 time_info = False 572 for i, jampl_list in enumerate(ampl_to_jampl): 573 # This can be pretty long for processes with many color flows. 574 # So, if necessary (i.e. for more than 15s), we tell the user the 575 # estimated time for the processing. 576 if i==5: 577 elapsed_time = time.time()-start 578 t = len(ampl_to_jampl)*(elapsed_time/5.0) 579 if t > 10.0: 580 time_info = True 581 logger.info('The color factors computation will take '+\ 582 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\ 583 'Started on %s.'%datetime.datetime.now().strftime(\ 584 "%d-%m-%Y %H:%M")) 585 if logger.getEffectiveLevel()<logging.WARNING: 586 widgets = ['Color computation:', pbar.Percentage(), ' ', 587 pbar.Bar(),' ', pbar.ETA(), ' '] 588 progress_bar = pbar.ProgressBar(widgets=widgets, 589 maxval=len(ampl_to_jampl), fd=sys.stdout) 590 591 if not progress_bar is None: 592 progress_bar.update(i+1) 593 # Flush to force the printout of the progress_bar to be updated 594 sys.stdout.flush() 595 596 line_num=[] 597 line_denom=[] 598 599 # Treat the special case where this specific amplitude contributes to no 600 # color flow at all. So it is zero because of color but not even due to 601 # an accidental cancellation among color flows, but simply because of its 602 # projection to each individual color flow is zero. In such case, the 603 # corresponding jampl_list is empty and all color coefficients must then 604 # be zero. This happens for example in the Higgs Effective Theory model 605 # for the bubble made of a 4-gluon vertex and the effective ggH vertex. 606 if len(jampl_list)==0: 607 line_num=[0]*len(ampb_to_jampb) 608 line_denom=[1]*len(ampb_to_jampb) 609 ColorMatrixNumOutput.append(line_num) 610 ColorMatrixDenomOutput.append(line_denom) 611 continue 612 613 for jampb_list in ampb_to_jampb: 614 real_num=0 615 imag_num=0 616 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]* 617 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for 618 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in 619 itertools.product(jampl_list,jampb_list)]) 620 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \ 621 itertools.product(jampl_list,jampb_list): 622 # take the numerator and multiply by lcm/denominator 623 # as we will later divide by the lcm. 624 buff_num=ampl_coeff[0].numerator*\ 625 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\ 626 abs(common_denom)/(ampl_coeff[0].denominator*\ 627 ampb_coeff[0].denominator*ColorMatrixDenom[jampl]) 628 # Remember that we must take the complex conjugate of 629 # the born jamp color coefficient because we will compute 630 # the square with 2 Re(LoopAmp x BornAmp*) 631 if ampl_coeff[1] and ampb_coeff[1]: 632 real_num=real_num+buff_num 633 elif not ampl_coeff[1] and not ampb_coeff[1]: 634 real_num=real_num+buff_num 635 elif not ampl_coeff[1] and ampb_coeff[1]: 636 imag_num=imag_num-buff_num 637 else: 638 imag_num=imag_num+buff_num 639 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\ 640 "color matrix element which has both a real and imaginary part." 641 if imag_num!=0: 642 res=fractions.Fraction(imag_num,common_denom) 643 line_num.append(res.numerator) 644 # Negative denominator means imaginary color coef of the 645 # final color matrix 646 line_denom.append(res.denominator*-1) 647 else: 648 res=fractions.Fraction(real_num,common_denom) 649 line_num.append(res.numerator) 650 # Positive denominator means real color coef of the final color matrix 651 line_denom.append(res.denominator) 652 653 ColorMatrixNumOutput.append(line_num) 654 ColorMatrixDenomOutput.append(line_denom) 655 656 if time_info: 657 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\ 658 "%d-%m-%Y %H:%M")) 659 if progress_bar!=None: 660 progress_bar.finish() 661 662 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
663
664 - def get_context(self,matrix_element):
665 """ Returns the contextual variables which need to be set when 666 pre-processing the template files.""" 667 668 # The nSquaredSO entry of the general replace dictionary should have 669 # been set in write_loopmatrix prior to the first call to this function 670 # However, for cases where the TIRCaching contextual variable is 671 # irrelevant (like in the default output), this might not be the case 672 # so we set it to 1. 673 try: 674 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO'] 675 except (KeyError, AttributeError): 676 n_squared_split_orders = 1 677 678 LoopInduced = not matrix_element.get('processes')[0].get('has_born') 679 680 # Force the computation of loop color flows for loop_induced processes 681 ComputeColorFlows = self.compute_color_flows or LoopInduced 682 # The variable AmplitudeReduction is just to make the contextual 683 # conditions more readable in the include files. 684 AmplitudeReduction = LoopInduced or ComputeColorFlows 685 # Even when not reducing at the amplitude level, the TIR caching 686 # is useful when there is more than one squared split order config. 687 TIRCaching = AmplitudeReduction or n_squared_split_orders>1 688 MadEventOutput = False 689 690 return {'LoopInduced': LoopInduced, 691 'ComputeColorFlows': ComputeColorFlows, 692 'AmplitudeReduction': AmplitudeReduction, 693 'TIRCaching': TIRCaching, 694 'MadEventOutput': MadEventOutput}
695 696 #=========================================================================== 697 # generate_subprocess_directory 698 #===========================================================================
699 - def generate_loop_subprocess(self, matrix_element, fortran_model, 700 group_number = None, proc_id = None, config_map=None, unique_id=None):
701 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone, 702 including the necessary loop_matrix.f, born_matrix.f and include files. 703 Notice that this is too different from generate_subprocess_directory 704 so that there is no point reusing this mother function. 705 The 'group_number' and 'proc_id' options are only used for the LoopInduced 706 MadEvent output and only to specify the ME_identifier and the P* 707 SubProcess directory name.""" 708 709 cwd = os.getcwd() 710 proc_dir_name = self.get_SubProc_folder_name( 711 matrix_element.get('processes')[0],group_number,proc_id) 712 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name) 713 714 try: 715 os.mkdir(dirpath) 716 except os.error as error: 717 logger.warning(error.strerror + " " + dirpath) 718 719 try: 720 os.chdir(dirpath) 721 except os.error: 722 logger.error('Could not cd to directory %s' % dirpath) 723 return 0 724 725 logger.info('Creating files in directory %s' % dirpath) 726 727 if unique_id is None: 728 raise MadGraph5Error, 'A unique id must be provided to the function'+\ 729 'generate_loop_subprocess of LoopProcessExporterFortranSA.' 730 # Create an include with the unique consecutive ID assigned 731 open('unique_id.inc','w').write( 732 """ integer UNIQUE_ID 733 parameter(UNIQUE_ID=%d)"""%unique_id) 734 735 # Extract number of external particles 736 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 737 738 calls=self.write_loop_matrix_element_v4(None,matrix_element, 739 fortran_model, group_number = group_number, 740 proc_id = proc_id, config_map = config_map) 741 742 # We assume here that all processes must share the same property of 743 # having a born or not, which must be true anyway since these are two 744 # definite different classes of processes which can never be treated on 745 # the same footing. 746 if matrix_element.get('processes')[0].get('has_born'): 747 filename = 'born_matrix.f' 748 calls = self.write_bornmatrix( 749 writers.FortranWriter(filename), 750 matrix_element, 751 fortran_model) 752 753 filename = 'pmass.inc' 754 self.write_pmass_file(writers.FortranWriter(filename), 755 matrix_element) 756 757 filename = 'ngraphs.inc' 758 self.write_ngraphs_file(writers.FortranWriter(filename), 759 len(matrix_element.get_all_amplitudes())) 760 761 # Do not draw the loop diagrams if they are too many. 762 # The user can always decide to do it manually, if really needed 763 loop_diags = [loop_diag for loop_diag in\ 764 matrix_element.get('base_amplitude').get('loop_diagrams')\ 765 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0] 766 if len(loop_diags)>5000: 767 logger.info("There are more than 5000 loop diagrams."+\ 768 "Only the first 5000 are drawn.") 769 filename = "loop_matrix.ps" 770 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 771 loop_diags[:5000]),filename, 772 model=matrix_element.get('processes')[0].get('model'),amplitude='') 773 logger.info("Drawing loop Feynman diagrams for " + \ 774 matrix_element.get('processes')[0].nice_string()) 775 plot.draw() 776 777 if matrix_element.get('processes')[0].get('has_born'): 778 filename = "born_matrix.ps" 779 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 780 get('born_diagrams'), 781 filename, 782 model=matrix_element.get('processes')[0].\ 783 get('model'), 784 amplitude='') 785 logger.info("Generating born Feynman diagrams for " + \ 786 matrix_element.get('processes')[0].nice_string(\ 787 print_weighted=False)) 788 plot.draw() 789 790 self.link_files_from_Subprocesses(self.get_SubProc_folder_name( 791 matrix_element.get('processes')[0],group_number,proc_id)) 792 793 # Return to original PWD 794 os.chdir(cwd) 795 796 if not calls: 797 calls = 0 798 return calls
799 820
821 - def generate_general_replace_dict(self,matrix_element, 822 group_number = None, proc_id = None):
823 """Generates the entries for the general replacement dictionary used 824 for the different output codes for this exporter.The arguments 825 group_number and proc_id are just for the LoopInduced output with MadEvent.""" 826 827 dict={} 828 # A general process prefix which appears in front of all MadLooop 829 # subroutines and common block so that several processes can be compiled 830 # together into one library, as necessary to follow BLHA guidelines. 831 832 dict['proc_prefix'] = self.get_ME_identifier(matrix_element, 833 group_number = group_number, group_elem_number = proc_id) 834 835 # The proc_id is used for MadEvent grouping, so none of our concern here 836 # and it is simply set to an empty string. 837 dict['proc_id'] = '' 838 # Extract version number and date from VERSION file 839 info_lines = self.get_mg5_info_lines() 840 dict['info_lines'] = info_lines 841 # Extract process info lines 842 process_lines = self.get_process_info_lines(matrix_element) 843 dict['process_lines'] = process_lines 844 # Extract number of external particles 845 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 846 dict['nexternal'] = nexternal 847 dict['nincoming'] = ninitial 848 # Extract ncomb 849 ncomb = matrix_element.get_helicity_combinations() 850 dict['ncomb'] = ncomb 851 # Extract nloopamps 852 nloopamps = matrix_element.get_number_of_loop_amplitudes() 853 dict['nloopamps'] = nloopamps 854 # Extract nloopdiags 855 nloopdiags = len(matrix_element.get('diagrams')) 856 dict['nloopdiags'] = nloopdiags 857 # Extract nctamps 858 nctamps = matrix_element.get_number_of_CT_amplitudes() 859 dict['nctamps'] = nctamps 860 # Extract nwavefuncs 861 nwavefuncs = matrix_element.get_number_of_external_wavefunctions() 862 dict['nwavefuncs'] = nwavefuncs 863 # Set format of the double precision 864 dict['real_dp_format']='real*8' 865 dict['real_mp_format']='real*16' 866 # Set format of the complex 867 dict['complex_dp_format']='complex*16' 868 dict['complex_mp_format']='complex*32' 869 # Set format of the masses 870 dict['mass_dp_format'] = dict['complex_dp_format'] 871 dict['mass_mp_format'] = dict['complex_mp_format'] 872 # Fill in default values for the placeholders for the madevent 873 # loop-induced output 874 dict['nmultichannels'] = 0 875 dict['nmultichannel_configs'] = 0 876 dict['config_map_definition'] = '' 877 dict['config_index_map_definition'] = '' 878 # Color matrix size 879 # For loop induced processes it is NLOOPAMPSxNLOOPAMPS and otherwise 880 # it is NLOOPAMPSxNBORNAMPS 881 # Also, how to access the number of Born squared order contributions 882 883 if matrix_element.get('processes')[0].get('has_born'): 884 dict['color_matrix_size'] = 'nbornamps' 885 dict['get_nsqso_born']=\ 886 "include 'nsqso_born.inc'" 887 else: 888 dict['get_nsqso_born']="""INTEGER NSQSO_BORN 889 PARAMETER (NSQSO_BORN=0) 890 """ 891 dict['color_matrix_size'] = 'nloopamps' 892 893 # These placeholders help to have as many common templates for the 894 # output of the loop induced processes and those with a born 895 # contribution. 896 if matrix_element.get('processes')[0].get('has_born'): 897 # Extract nbornamps 898 nbornamps = matrix_element.get_number_of_born_amplitudes() 899 dict['nbornamps'] = nbornamps 900 dict['ncomb_helas_objs'] = ',ncomb' 901 dict['nbornamps_decl'] = \ 902 """INTEGER NBORNAMPS 903 PARAMETER (NBORNAMPS=%d)"""%nbornamps 904 dict['nBornAmps'] = nbornamps 905 906 else: 907 dict['ncomb_helas_objs'] = '' 908 dict['dp_born_amps_decl'] = '' 909 dict['dp_born_amps_decl_in_mp'] = '' 910 dict['copy_mp_to_dp_born_amps'] = '' 911 dict['mp_born_amps_decl'] = '' 912 dict['nbornamps_decl'] = '' 913 dict['nbornamps'] = 0 914 dict['nBornAmps'] = 0 915 916 return dict
917
918 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 919 group_number = None, proc_id = None, config_map = None):
920 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and 921 mp_born_amps_and_wfs. 922 The arguments group_number and proc_id are just for the LoopInduced 923 output with MadEvent and only used in get_ME_identifier. 924 """ 925 926 # Create the necessary files for the loop matrix element subroutine 927 928 if config_map: 929 raise MadGraph5Error, 'The default loop output cannot be used with'+\ 930 'MadEvent and cannot compute the AMP2 for multi-channeling.' 931 932 if not isinstance(fortran_model,\ 933 helas_call_writers.FortranUFOHelasCallWriter): 934 raise MadGraph5Error, 'The loop fortran output can only'+\ 935 ' work with a UFO Fortran model' 936 937 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter( 938 argument=fortran_model.get('model'), 939 hel_sum=matrix_element.get('processes')[0].get('has_born')) 940 941 # Compute the analytical information of the loop wavefunctions in the 942 # loop helas matrix elements using the cached aloha model to reuse 943 # as much as possible the aloha computations already performed for 944 # writing out the aloha fortran subroutines. 945 matrix_element.compute_all_analytic_information( 946 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 947 948 # Initialize a general replacement dictionary with entries common to 949 # many files generated here. 950 matrix_element.rep_dict = self.generate_general_replace_dict( 951 matrix_element, group_number = group_number, proc_id = proc_id) 952 953 # Extract max number of loop couplings (specific to this output type) 954 matrix_element.rep_dict['maxlcouplings']= \ 955 matrix_element.find_max_loop_coupling() 956 # The born amp declaration suited for also outputing the loop-induced 957 # processes as well. 958 if matrix_element.get('processes')[0].get('has_born'): 959 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \ 960 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\ 961 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix'] 962 matrix_element.rep_dict['dp_born_amps_decl'] = \ 963 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 964 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 965 matrix_element.rep_dict['mp_born_amps_decl'] = \ 966 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\ 967 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix'] 968 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \ 969 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO']) 970 971 if writer: 972 raise MadGraph5Error, 'Matrix output mode no longer supported.' 973 974 filename = 'loop_matrix.f' 975 calls = self.write_loopmatrix(writers.FortranWriter(filename), 976 matrix_element, 977 LoopFortranModel) 978 979 # Write out the proc_prefix in a file, this is quite handy 980 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 981 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 982 proc_prefix_writer.close() 983 984 filename = 'check_sa.f' 985 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 986 987 filename = 'CT_interface.f' 988 self.write_CT_interface(writers.FortranWriter(filename),\ 989 matrix_element) 990 991 992 993 filename = 'improve_ps.f' 994 calls = self.write_improve_ps(writers.FortranWriter(filename), 995 matrix_element) 996 997 filename = 'loop_num.f' 998 self.write_loop_num(writers.FortranWriter(filename),\ 999 matrix_element,LoopFortranModel) 1000 1001 filename = 'mp_born_amps_and_wfs.f' 1002 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\ 1003 matrix_element,LoopFortranModel) 1004 1005 # Extract number of external particles 1006 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1007 filename = 'nexternal.inc' 1008 self.write_nexternal_file(writers.FortranWriter(filename), 1009 nexternal, ninitial) 1010 1011 filename = 'process_info.inc' 1012 self.write_process_info_file(writers.FortranWriter(filename), 1013 matrix_element) 1014 return calls
1015
1016 - def write_process_info_file(self, writer, matrix_element):
1017 """A small structural function to write the include file specifying some 1018 process characteristics.""" 1019 1020 model = matrix_element.get('processes')[0].get('model') 1021 process_info = {} 1022 # The maximum spin of any particle connected (or directly running in) 1023 # any loop of this matrix element. This is important because there is 1024 # some limitation in the stability tests that can be performed when this 1025 # maximum spin is above 3 (vectors). Also CutTools has limitations in 1026 # this regard. 1027 process_info['max_spin_connected_to_loop']=\ 1028 matrix_element.get_max_spin_connected_to_loop() 1029 1030 process_info['max_spin_external_particle']= max( 1031 model.get_particle(l.get('id')).get('spin') for l in 1032 matrix_element.get('processes')[0].get('legs')) 1033 1034 proc_include = \ 1035 """ 1036 INTEGER MAX_SPIN_CONNECTED_TO_LOOP 1037 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d) 1038 INTEGER MAX_SPIN_EXTERNAL_PARTICLE 1039 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d) 1040 """%process_info 1041 1042 writer.writelines(proc_include)
1043
1044 - def generate_subprocess_directory(self, matrix_element, fortran_model):
1045 """ To overload the default name for this function such that the correct 1046 function is used when called from the command interface """ 1047 1048 self.unique_id +=1 1049 return self.generate_loop_subprocess(matrix_element,fortran_model, 1050 unique_id=self.unique_id)
1051
1052 - def write_check_sa(self, writer, matrix_element):
1053 """Writes out the steering code check_sa. In the optimized output mode, 1054 All the necessary entries in the replace_dictionary have already been 1055 set in write_loopmatrix because it is only there that one has access to 1056 the information about split orders.""" 1057 replace_dict = copy.copy(matrix_element.rep_dict) 1058 for key in ['print_so_born_results','print_so_loop_results', 1059 'write_so_born_results','write_so_loop_results','set_coupling_target']: 1060 if key not in replace_dict.keys(): 1061 replace_dict[key]='' 1062 1063 if matrix_element.get('processes')[0].get('has_born'): 1064 file = open(os.path.join(self.template_dir,'check_sa.inc')).read() 1065 else: 1066 file = open(os.path.join(self.template_dir,\ 1067 'check_sa_loop_induced.inc')).read() 1068 file=file%replace_dict 1069 writer.writelines(file) 1070 1071 # We can always write the f2py wrapper if present (in loop optimized mode, it is) 1072 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')): 1073 return 1074 file = open(os.path.join(self.template_dir,\ 1075 'check_py.f.inc')).read() 1076 file=file%replace_dict 1077 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f') 1078 new_writer = writer.__class__(new_path, 'w') 1079 new_writer.writelines(file) 1080 1081 file = open(os.path.join(self.template_dir,\ 1082 'check_sa.py.inc')).read() 1083 # For now just put in an empty PS point but in the future, maybe generate 1084 # a valid one already here by default 1085 curr_proc = matrix_element.get('processes')[0] 1086 random_PSpoint_python_formatted = \ 1087 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input. 1088 p= [[None,]*4]*%d"""%len(curr_proc.get('legs')) 1089 1090 process_definition_string = curr_proc.nice_string().replace('Process:','') 1091 file=file.format(random_PSpoint_python_formatted,process_definition_string) 1092 new_path = writer.name.replace('check_sa.f', 'check_sa.py') 1093 new_writer = open(new_path, 'w') 1094 new_writer.writelines(file) 1095 # Make it executable 1096 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1097
1098 - def write_improve_ps(self, writer, matrix_element):
1099 """ Write out the improve_ps subroutines which modify the PS point 1100 given in input and slightly deform it to achieve exact onshellness on 1101 all external particles as well as perfect energy-momentum conservation""" 1102 replace_dict = copy.copy(matrix_element.rep_dict) 1103 1104 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial() 1105 replace_dict['ninitial']=ninitial 1106 mass_list=matrix_element.get_external_masses()[:-2] 1107 mp_variable_prefix = check_param_card.ParamCard.mp_prefix 1108 1109 # Write the quadruple precision version of this routine only. 1110 replace_dict['real_format']=replace_dict['real_mp_format'] 1111 replace_dict['mp_prefix']='MP_' 1112 replace_dict['exp_letter']='e' 1113 replace_dict['mp_specifier']='_16' 1114 replace_dict['coupl_inc_name']='mp_coupl.inc' 1115 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\ 1116 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \ 1117 i, m in enumerate(mass_list)]) 1118 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read() 1119 file_mp=file_mp%replace_dict 1120 # 1121 writer.writelines(file_mp)
1122
1123 - def write_loop_num(self, writer, matrix_element,fortran_model):
1124 """ Create the file containing the core subroutine called by CutTools 1125 which contains the Helas calls building the loop""" 1126 1127 if not matrix_element.get('processes') or \ 1128 not matrix_element.get('diagrams'): 1129 return 0 1130 1131 # Set lowercase/uppercase Fortran code 1132 writers.FortranWriter.downcase = False 1133 1134 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 1135 1136 replace_dict = copy.copy(matrix_element.rep_dict) 1137 1138 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element) 1139 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling() 1140 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls) 1141 1142 # The squaring is only necessary for the processes with born where the 1143 # sum over helicities is done before sending the numerator to CT. 1144 dp_squaring_lines=['DO I=1,NBORNAMPS', 1145 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)', 1146 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1147 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO'] 1148 mp_squaring_lines=['DO I=1,NBORNAMPS', 1149 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)', 1150 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1', 1151 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO'] 1152 if matrix_element.get('processes')[0].get('has_born'): 1153 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines) 1154 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines) 1155 else: 1156 replace_dict['dp_squaring']='RES=BUFF' 1157 replace_dict['mp_squaring']='QPRES=BUFF' 1158 1159 # Prepend MP_ to all helas calls. 1160 self.turn_to_mp_calls(loop_helas_calls) 1161 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls) 1162 1163 file=file%replace_dict 1164 1165 if writer: 1166 writer.writelines(file) 1167 else: 1168 return file
1169
1170 - def write_CT_interface(self, writer, matrix_element, optimized_output=False):
1171 """ Create the file CT_interface.f which contains the subroutine defining 1172 the loop HELAS-like calls along with the general interfacing subroutine. 1173 It is used to interface against any OPP tool, including Samurai and Ninja.""" 1174 1175 files=[] 1176 1177 # First write CT_interface which interfaces MG5 with CutTools. 1178 replace_dict=copy.copy(matrix_element.rep_dict) 1179 1180 # We finalize CT result differently wether we used the built-in 1181 # squaring against the born. 1182 if matrix_element.get('processes')[0].get('has_born'): 1183 replace_dict['finalize_CT']='\n'.join([\ 1184 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)]) 1185 else: 1186 replace_dict['finalize_CT']='\n'.join([\ 1187 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)]) 1188 1189 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read() 1190 1191 file = file % replace_dict 1192 files.append(file) 1193 1194 # Now collect the different kind of subroutines needed for the 1195 # loop HELAS-like calls. 1196 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps() 1197 1198 for callkey in HelasLoopAmpsCallKeys: 1199 replace_dict=copy.copy(matrix_element.rep_dict) 1200 # Add to this dictionary all other attribute common to all 1201 # HELAS-like loop subroutines. 1202 if matrix_element.get('processes')[0].get('has_born'): 1203 replace_dict['validh_or_nothing']=',validh' 1204 else: 1205 replace_dict['validh_or_nothing']='' 1206 # In the optimized output, the number of couplings in the loop is 1207 # not specified so we only treat it here if necessary: 1208 if len(callkey)>2: 1209 replace_dict['ncplsargs']=callkey[2] 1210 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)]) 1211 replace_dict['cplsargs']=cplsargs 1212 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1213 replace_dict['cplsdecl']=cplsdecl 1214 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2] 1215 replace_dict['mp_cplsdecl']=mp_cplsdecl 1216 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\ 1217 "MP_LC(%d)=MP_C%d"%(i,i)])\ 1218 for i in range(1,callkey[2]+1)]) 1219 replace_dict['cplset']=cplset 1220 1221 replace_dict['nloopline']=callkey[0] 1222 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)]) 1223 replace_dict['wfsargs']=wfsargs 1224 # We don't pass the multiple precision mass in the optimized_output 1225 if not optimized_output: 1226 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)]) 1227 else: 1228 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)]) 1229 replace_dict['margs']=margs 1230 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2] 1231 replace_dict['wfsargsdecl']=wfsargsdecl 1232 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1233 replace_dict['margsdecl']=margsdecl 1234 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2] 1235 replace_dict['mp_margsdecl']=mp_margsdecl 1236 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \ 1237 i in range(1,callkey[1]+1)]) 1238 replace_dict['weset']=weset 1239 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)]) 1240 replace_dict['weset']=weset 1241 msetlines=["M2L(1)=M%d**2"%(callkey[0]),] 1242 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \ 1243 i in range(2,callkey[0]+1)]) 1244 replace_dict['mset']=mset 1245 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]), 1246 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])] 1247 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2), 1248 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \ 1249 i in range(3,callkey[0]+3)]) 1250 replace_dict['mset2']=mset2 1251 replace_dict['nwfsargs'] = callkey[1] 1252 if callkey[0]==callkey[1]: 1253 replace_dict['nwfsargs_header'] = "" 1254 replace_dict['pairingargs']="" 1255 replace_dict['pairingdecl']="" 1256 pairingset="""DO I=1,NLOOPLINE 1257 PAIRING(I)=1 1258 ENDDO 1259 """ 1260 replace_dict['pairingset']=pairingset 1261 else: 1262 replace_dict['nwfsargs_header'] = '_%d'%callkey[1] 1263 pairingargs="".join([("P"+str(i)+", ") for i in \ 1264 range(1,callkey[0]+1)]) 1265 replace_dict['pairingargs']=pairingargs 1266 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \ 1267 range(1,callkey[0]+1)])[:-2] 1268 replace_dict['pairingdecl']=pairingdecl 1269 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \ 1270 i in range(1,callkey[0]+1)]) 1271 replace_dict['pairingset']=pairingset 1272 1273 file = open(os.path.join(self.template_dir,\ 1274 'helas_loop_amplitude.inc')).read() 1275 file = file % replace_dict 1276 files.append(file) 1277 1278 file="\n".join(files) 1279 1280 if writer: 1281 writer.writelines(file,context=self.get_context(matrix_element)) 1282 else: 1283 return file
1284 1285 # Helper function to split HELAS CALLS in dedicated subroutines placed 1286 # in different files.
1287 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \ 1288 helas_calls, entry_name, bunch_name,n_helas=2000, 1289 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 1290 continue_label = 1000, momenta_array_name='P', 1291 context={}):
1292 """ Finish the code generation with splitting. 1293 Split the helas calls in the argument helas_calls into bunches of 1294 size n_helas and place them in dedicated subroutine with name 1295 <bunch_name>_i. Also setup the corresponding calls to these subroutine 1296 in the replace_dict dictionary under the entry entry_name. 1297 The context specified will be forwarded to the the fileWriter.""" 1298 helascalls_replace_dict=copy.copy(replace_dict) 1299 helascalls_replace_dict['bunch_name']=bunch_name 1300 helascalls_files=[] 1301 for i, k in enumerate(range(0, len(helas_calls), n_helas)): 1302 helascalls_replace_dict['bunch_number']=i+1 1303 helascalls_replace_dict['helas_calls']=\ 1304 '\n'.join(helas_calls[k:k + n_helas]) 1305 helascalls_replace_dict['required_so_broadcaster']=\ 1306 required_so_broadcaster 1307 helascalls_replace_dict['continue_label']=continue_label 1308 new_helascalls_file = open(os.path.join(self.template_dir,\ 1309 template_name)).read() 1310 new_helascalls_file = new_helascalls_file % helascalls_replace_dict 1311 helascalls_files.append(new_helascalls_file) 1312 # Setup the call to these HELASCALLS subroutines in loop_matrix.f 1313 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\ 1314 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \ 1315 for a in range(len(helascalls_files))] 1316 replace_dict[entry_name]='\n'.join(helascalls_calls) 1317 if writer: 1318 for i, helascalls_file in enumerate(helascalls_files): 1319 filename = '%s_%d.f'%(bunch_name,i+1) 1320 writers.FortranWriter(filename).writelines(helascalls_file, 1321 context=context) 1322 else: 1323 masterfile='\n'.join([masterfile,]+helascalls_files) 1324 1325 return masterfile
1326
1327 - def write_loopmatrix(self, writer, matrix_element, fortran_model, 1328 noSplit=False):
1329 """Create the loop_matrix.f file.""" 1330 1331 if not matrix_element.get('processes') or \ 1332 not matrix_element.get('diagrams'): 1333 return 0 1334 1335 # Set lowercase/uppercase Fortran code 1336 1337 writers.FortranWriter.downcase = False 1338 1339 replace_dict = copy.copy(matrix_element.rep_dict) 1340 1341 # Extract overall denominator 1342 # Averaging initial state color, spin, and identical FS particles 1343 den_factor_line = self.get_den_factor_line(matrix_element) 1344 replace_dict['den_factor_line'] = den_factor_line 1345 # When the user asks for the polarized matrix element we must 1346 # multiply back by the helicity averaging factor 1347 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 1348 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 1349 matrix_element.get_beams_hel_avg_factor() 1350 1351 # These entries are specific for the output for loop-induced processes 1352 # Also sets here the details of the squaring of the loop ampltiudes 1353 # with the born or the loop ones. 1354 if not matrix_element.get('processes')[0].get('has_born'): 1355 replace_dict['compute_born']=\ 1356 """C There is of course no born for loop induced processes 1357 ANS(0)=0.0d0 1358 """ 1359 replace_dict['set_reference']='\n'.join([ 1360 'C For loop-induced, the reference for comparison is set later'+\ 1361 ' from the total contribution of the previous PS point considered.', 1362 'C But you can edit here the value to be used for the first PS point.', 1363 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else', 1364 'ref=nextRef/DBLE(NPSPOINTS)','endif']) 1365 replace_dict['loop_induced_setup'] = '\n'.join([ 1366 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.', 1367 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF']) 1368 replace_dict['loop_induced_finalize'] = \ 1369 ("""DO I=NCTAMPS+1,NLOOPAMPS 1370 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN 1371 WRITE(*,*) '##W03 WARNING Contribution ',I 1372 WRITE(*,*) ' is unstable for helicity ',H 1373 ENDIF 1374 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN 1375 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.' 1376 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I) 1377 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I) 1378 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I) 1379 C ENDIF 1380 ENDDO 1381 1227 CONTINUE 1382 HELPICKED=HELPICKED_BU""")%replace_dict 1383 replace_dict['loop_helas_calls']="" 1384 replace_dict['nctamps_or_nloopamps']='nloopamps' 1385 replace_dict['nbornamps_or_nloopamps']='nloopamps' 1386 replace_dict['squaring']=\ 1387 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J))) 1388 IF (J.EQ.1) THEN 1389 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I)) 1390 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I)) 1391 ENDIF""" 1392 else: 1393 replace_dict['compute_born']=\ 1394 """C Compute the born, for a specific helicity if asked so. 1395 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0)) 1396 """%matrix_element.rep_dict 1397 replace_dict['set_reference']=\ 1398 """C We chose to use the born evaluation for the reference 1399 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict 1400 replace_dict['loop_induced_helas_calls'] = "" 1401 replace_dict['loop_induced_finalize'] = "" 1402 replace_dict['loop_induced_setup'] = "" 1403 replace_dict['nctamps_or_nloopamps']='nctamps' 1404 replace_dict['nbornamps_or_nloopamps']='nbornamps' 1405 replace_dict['squaring']='\n'.join(['DO K=1,3', 1406 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))', 1407 'ENDDO']) 1408 1409 # Write a dummy nsquaredSO.inc which is used in the default 1410 # loop_matrix.f code (even though it does not support split orders evals) 1411 # just to comply with the syntax expected from the external code using MadLoop. 1412 writers.FortranWriter('nsquaredSO.inc').writelines( 1413 """INTEGER NSQUAREDSO 1414 PARAMETER (NSQUAREDSO=0)""") 1415 1416 # Actualize results from the loops computed. Only necessary for 1417 # processes with a born. 1418 actualize_ans=[] 1419 if matrix_element.get('processes')[0].get('has_born'): 1420 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS") 1421 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \ 1422 in range(1,4)) 1423 actualize_ans.append(\ 1424 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN") 1425 actualize_ans.append(\ 1426 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'") 1427 actualize_ans.extend(["ENDIF","ENDDO"]) 1428 replace_dict['actualize_ans']='\n'.join(actualize_ans) 1429 else: 1430 replace_dict['actualize_ans']=\ 1431 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check. 1432 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN 1433 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.' 1434 C WRITE(*,*) 'Finite contribution = ',ANS(1) 1435 C WRITE(*,*) 'single pole contribution = ',ANS(2) 1436 C WRITE(*,*) 'double pole contribution = ',ANS(3) 1437 C ENDIF""")%replace_dict 1438 1439 # Write out the color matrix 1440 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 1441 CMWriter=open(pjoin('..','MadLoop5_resources', 1442 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 1443 for ColorLine in CMNum: 1444 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1445 CMWriter.close() 1446 CMWriter=open(pjoin('..','MadLoop5_resources', 1447 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 1448 for ColorLine in CMDenom: 1449 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 1450 CMWriter.close() 1451 1452 # Write out the helicity configurations 1453 HelConfigs=matrix_element.get_helicity_matrix() 1454 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 1455 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 1456 for HelConfig in HelConfigs: 1457 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 1458 HelConfigWriter.close() 1459 1460 # Extract helas calls 1461 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\ 1462 matrix_element) 1463 # The proc_prefix must be replaced 1464 loop_amp_helas_calls = [lc % matrix_element.rep_dict 1465 for lc in loop_amp_helas_calls] 1466 1467 born_ct_helas_calls, UVCT_helas_calls = \ 1468 fortran_model.get_born_ct_helas_calls(matrix_element) 1469 # In the default output, we do not need to separate these two kind of 1470 # contributions 1471 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls 1472 file = open(os.path.join(self.template_dir,\ 1473 1474 'loop_matrix_standalone.inc')).read() 1475 1476 if matrix_element.get('processes')[0].get('has_born'): 1477 toBeRepaced='loop_helas_calls' 1478 else: 1479 toBeRepaced='loop_induced_helas_calls' 1480 1481 # Decide here wether we need to split the loop_matrix.f file or not. 1482 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)): 1483 file=self.split_HELASCALLS(writer,replace_dict,\ 1484 'helas_calls_split.inc',file,born_ct_helas_calls,\ 1485 'born_ct_helas_calls','helas_calls_ampb') 1486 file=self.split_HELASCALLS(writer,replace_dict,\ 1487 'helas_calls_split.inc',file,loop_amp_helas_calls,\ 1488 toBeRepaced,'helas_calls_ampl') 1489 else: 1490 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls) 1491 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls) 1492 1493 file = file % replace_dict 1494 1495 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*') 1496 n_loop_calls = len(filter(lambda call: 1497 not loop_calls_finder.match(call) is None, loop_amp_helas_calls)) 1498 if writer: 1499 # Write the file 1500 writer.writelines(file) 1501 return n_loop_calls 1502 else: 1503 # Return it to be written along with the others 1504 return n_loop_calls, file
1505
1506 - def write_bornmatrix(self, writer, matrix_element, fortran_model):
1507 """Create the born_matrix.f file for the born process as for a standard 1508 tree-level computation.""" 1509 1510 if not matrix_element.get('processes') or \ 1511 not matrix_element.get('diagrams'): 1512 return 0 1513 1514 if not isinstance(writer, writers.FortranWriter): 1515 raise writers.FortranWriter.FortranWriterError(\ 1516 "writer not FortranWriter") 1517 1518 # For now, we can use the exact same treatment as for tree-level 1519 # computations by redefining here a regular HelasMatrixElementf or the 1520 # born process. 1521 # It is important to make a deepcopy, as we don't want any possible 1522 # treatment on the objects of the bornME to have border effects on 1523 # the content of the LoopHelasMatrixElement object. 1524 bornME = helas_objects.HelasMatrixElement() 1525 for prop in bornME.keys(): 1526 bornME.set(prop,copy.deepcopy(matrix_element.get(prop))) 1527 bornME.set('base_amplitude',None,force=True) 1528 bornME.set('diagrams',copy.deepcopy(\ 1529 matrix_element.get_born_diagrams())) 1530 bornME.set('color_basis',copy.deepcopy(\ 1531 matrix_element.get('born_color_basis'))) 1532 bornME.set('color_matrix',copy.deepcopy(\ 1533 color_amp.ColorMatrix(bornME.get('color_basis')))) 1534 # This is to decide wether once to reuse old wavefunction to store new 1535 # ones (provided they are not used further in the code.) 1536 bornME.optimization = True 1537 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4( 1538 writer, bornME, fortran_model, 1539 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1540
1541 - def write_born_amps_and_wfs(self, writer, matrix_element, fortran_model, 1542 noSplit=False):
1543 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which 1544 computes just the external wavefunction and born amplitudes in 1545 multiple precision. """ 1546 1547 if not matrix_element.get('processes') or \ 1548 not matrix_element.get('diagrams'): 1549 return 0 1550 1551 replace_dict = copy.copy(matrix_element.rep_dict) 1552 1553 # For the wavefunction copy, check what suffix is needed for the W array 1554 if matrix_element.get('processes')[0].get('has_born'): 1555 replace_dict['h_w_suffix']=',H' 1556 else: 1557 replace_dict['h_w_suffix']='' 1558 1559 # Extract helas calls 1560 born_amps_and_wfs_calls , uvct_amp_calls = \ 1561 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True) 1562 # In the default output, these two kind of contributions do not need to 1563 # be differentiated 1564 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls 1565 1566 # Turn these HELAS calls to the multiple-precision version of the HELAS 1567 # subroutines. 1568 self.turn_to_mp_calls(born_amps_and_wfs_calls) 1569 1570 file = open(os.path.join(self.template_dir,\ 1571 'mp_born_amps_and_wfs.inc')).read() 1572 # Decide here wether we need to split the loop_matrix.f file or not. 1573 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)): 1574 file=self.split_HELASCALLS(writer,replace_dict,\ 1575 'mp_helas_calls_split.inc',file,\ 1576 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\ 1577 'mp_helas_calls') 1578 else: 1579 replace_dict['born_amps_and_wfs_calls']=\ 1580 '\n'.join(born_amps_and_wfs_calls) 1581 1582 file = file % replace_dict 1583 if writer: 1584 # Write the file 1585 writer.writelines(file) 1586 else: 1587 # Return it to be written along with the others 1588 return file 1589 1590 #=============================================================================== 1591 # LoopProcessOptimizedExporterFortranSA 1592 #=============================================================================== 1593
1594 -class LoopProcessOptimizedExporterFortranSA(LoopProcessExporterFortranSA):
1595 """Class to take care of exporting a set of loop matrix elements in the 1596 Fortran format which exploits the Pozzorini method of representing 1597 the loop numerators as polynomial to render its evaluations faster.""" 1598 1599 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized') 1600 # The option below controls wether one wants to group together in one single 1601 # CutTools/TIR call the loops with same denominator structure 1602 forbid_loop_grouping = False 1603 1604 # List of potential TIR library one wants to link to. 1605 # Golem and Samurai will typically get obtained from gosam_contrib 1606 # which might also contain a version of ninja. We must therefore 1607 # make sure that ninja appears first in the list of -L because 1608 # it is the tool for which the user is most susceptible of 1609 # using a standalone verison independent of gosam_contrib 1610 all_tir=['pjfry','iregi','ninja','golem','samurai','collier'] 1611
1612 - def __init__(self, dir_path = "", opt=None):
1613 """Initiate the LoopProcessOptimizedExporterFortranSA with directory 1614 information on where to find all the loop-related source files, 1615 like CutTools and TIR""" 1616 1617 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt) 1618 1619 # TIR available ones 1620 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True, 1621 'samurai':True,'ninja':True,'collier':True} 1622 1623 for tir in self.all_tir: 1624 tir_dir="%s_dir"%tir 1625 if tir_dir in self.opt and not self.opt[tir_dir] is None: 1626 # Make sure to defer the 'local path' to the current MG5aMC root. 1627 tir_path = self.opt[tir_dir].strip() 1628 if tir_path.startswith('.'): 1629 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path)) 1630 setattr(self,tir_dir,tir_path) 1631 else: 1632 setattr(self,tir_dir,'')
1633
1634 - def copy_template(self, model):
1635 """Additional actions needed to setup the Template. 1636 """ 1637 1638 super(LoopProcessOptimizedExporterFortranSA, self).copy_template(model) 1639 1640 self.loop_optimized_additional_template_setup()
1641
1642 - def get_context(self,matrix_element, **opts):
1643 """ Additional contextual information which needs to be created for 1644 the optimized output.""" 1645 1646 context = LoopProcessExporterFortranSA.get_context(self, matrix_element, 1647 **opts) 1648 1649 # For now assume Ninja always supports quadruple precision 1650 try: 1651 context['ninja_supports_quad_prec'] = \ 1652 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir')) 1653 except AttributeError: 1654 context['ninja_supports_quad_prec'] = False 1655 1656 for tir in self.all_tir: 1657 context['%s_available'%tir]=self.tir_available_dict[tir] 1658 # safety check 1659 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']: 1660 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name 1661 1662 return context
1663
1665 """ Perform additional actions specific for this class when setting 1666 up the template with the copy_template function.""" 1667 1668 # We must link the TIR to the Library folder of the active Template 1669 link_tir_libs=[] 1670 tir_libs=[] 1671 tir_include=[] 1672 1673 for tir in self.all_tir: 1674 tir_dir="%s_dir"%tir 1675 libpath=getattr(self,tir_dir) 1676 libname="lib%s.a"%tir 1677 tir_name=tir 1678 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 1679 libpath,libname,tir_name=tir_name) 1680 if libpath != "": 1681 if tir in ['ninja','pjfry','golem','samurai','collier']: 1682 # It is cleaner to use the original location of the libraries 1683 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 1684 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 1685 # For Ninja, we must also link against OneLoop. 1686 if tir in ['ninja']: 1687 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 1688 for ext in ['a','dylib','so']): 1689 raise MadGraph5Error( 1690 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 1691 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 1692 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 1693 if tir in ['ninja','golem', 'samurai','collier']: 1694 trgt_path = pjoin(os.path.dirname(libpath),'include') 1695 if os.path.isdir(trgt_path): 1696 to_include = misc.find_includes_path(trgt_path, 1697 self.include_names[tir]) 1698 else: 1699 to_include = None 1700 # Special possible location for collier 1701 if to_include is None and tir=='collier': 1702 to_include = misc.find_includes_path( 1703 pjoin(libpath,'modules'),self.include_names[tir]) 1704 if to_include is None: 1705 logger.error( 1706 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+ 1707 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 1708 to_include = '<Not_found_define_it_yourself>' 1709 tir_include.append('-I %s'%str(to_include)) 1710 # To be able to easily compile a MadLoop library using 1711 # makefiles built outside of the MG5_aMC framework 1712 # (such as what is done with the Sherpa interface), we 1713 # place here an easy handle on the golem includes 1714 name_map = {'golem':'golem95','samurai':'samurai', 1715 'ninja':'ninja','collier':'collier'} 1716 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'), 1717 name='%s_include'%name_map[tir],abspath=True) 1718 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'), 1719 name='%s_lib'%name_map[tir],abspath=True) 1720 else : 1721 link_tir_libs.append('-l%s'%tir) 1722 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 1723 1724 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses', 1725 'MadLoop_makefile_definitions') 1726 if os.path.isfile(MadLoop_makefile_definitions): 1727 os.remove(MadLoop_makefile_definitions) 1728 1729 calls = self.write_loop_makefile_definitions( 1730 writers.MakefileWriter(MadLoop_makefile_definitions), 1731 link_tir_libs,tir_libs, tir_include=tir_include) 1732 1733 # Finally overwrite MadLoopCommons.f now that we know the availibility of 1734 # COLLIER. 1735 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 1736 "SubProcesses","MadLoopCommons.inc")).read() 1737 writer = writers.FortranWriter(os.path.join(self.dir_path, 1738 "SubProcesses","MadLoopCommons.f")) 1739 writer.writelines(MadLoopCommon%{ 1740 'print_banner_commands':self.MadLoop_banner}, context={ 1741 'collier_available':self.tir_available_dict['collier']}) 1742 writer.close()
1743 1755 1756 1884
1885 - def set_group_loops(self, matrix_element):
1886 """ Decides whether we must group loops or not for this matrix element""" 1887 1888 # Decide if loops sharing same denominator structures have to be grouped 1889 # together or not. 1890 if self.forbid_loop_grouping: 1891 self.group_loops = False 1892 else: 1893 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\ 1894 and matrix_element.get('processes')[0].get('has_born') 1895 1896 return self.group_loops
1897
1898 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
1899 """create the global information for loops""" 1900 1901 super(LoopProcessOptimizedExporterFortranSA,self).finalize(matrix_element, 1902 cmdhistory, MG5options, outputflag) 1903 self.write_global_specs(matrix_element)
1904 1905
1906 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model, 1907 group_number = None, proc_id = None, config_map = None):
1908 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f 1909 and loop_num.f only but with the optimized FortranModel. 1910 The arguments group_number and proc_id are just for the LoopInduced 1911 output with MadEvent and only used in get_ME_identifier.""" 1912 1913 # Warn the user that the 'matrix' output where all relevant code is 1914 # put together in a single file is not supported in this loop output. 1915 if writer: 1916 raise MadGraph5Error, 'Matrix output mode no longer supported.' 1917 1918 if not isinstance(fortran_model,\ 1919 helas_call_writers.FortranUFOHelasCallWriter): 1920 raise MadGraph5Error, 'The optimized loop fortran output can only'+\ 1921 ' work with a UFO Fortran model' 1922 OptimizedFortranModel=\ 1923 helas_call_writers.FortranUFOHelasCallWriterOptimized(\ 1924 fortran_model.get('model'),False) 1925 1926 1927 if not matrix_element.get('processes')[0].get('has_born') and \ 1928 not self.compute_color_flows: 1929 logger.debug("Color flows will be employed despite the option"+\ 1930 " 'loop_color_flows' being set to False because it is necessary"+\ 1931 " for optimizations.") 1932 1933 # Compute the analytical information of the loop wavefunctions in the 1934 # loop helas matrix elements using the cached aloha model to reuse 1935 # as much as possible the aloha computations already performed for 1936 # writing out the aloha fortran subroutines. 1937 matrix_element.compute_all_analytic_information( 1938 self.get_aloha_model(matrix_element.get('processes')[0].get('model'))) 1939 1940 self.set_group_loops(matrix_element) 1941 1942 # Initialize a general replacement dictionary with entries common to 1943 # many files generated here. 1944 matrix_element.rep_dict = LoopProcessExporterFortranSA.\ 1945 generate_general_replace_dict(self, matrix_element, 1946 group_number = group_number, proc_id = proc_id) 1947 1948 # and those specific to the optimized output 1949 self.set_optimized_output_specific_replace_dict_entries(matrix_element) 1950 1951 # Create the necessary files for the loop matrix element subroutine 1952 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w') 1953 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix']) 1954 proc_prefix_writer.close() 1955 1956 filename = 'loop_matrix.f' 1957 calls = self.write_loopmatrix(writers.FortranWriter(filename), 1958 matrix_element, 1959 OptimizedFortranModel) 1960 1961 filename = 'check_sa.f' 1962 self.write_check_sa(writers.FortranWriter(filename),matrix_element) 1963 1964 filename = 'polynomial.f' 1965 calls = self.write_polynomial_subroutines( 1966 writers.FortranWriter(filename), 1967 matrix_element) 1968 1969 filename = 'improve_ps.f' 1970 calls = self.write_improve_ps(writers.FortranWriter(filename), 1971 matrix_element) 1972 1973 filename = 'CT_interface.f' 1974 self.write_CT_interface(writers.FortranWriter(filename),\ 1975 matrix_element) 1976 1977 filename = 'TIR_interface.f' 1978 self.write_TIR_interface(writers.FortranWriter(filename), 1979 matrix_element) 1980 1981 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']: 1982 filename = 'GOLEM_interface.f' 1983 self.write_GOLEM_interface(writers.FortranWriter(filename), 1984 matrix_element) 1985 1986 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']: 1987 filename = 'COLLIER_interface.f' 1988 self.write_COLLIER_interface(writers.FortranWriter(filename), 1989 matrix_element) 1990 1991 filename = 'loop_num.f' 1992 self.write_loop_num(writers.FortranWriter(filename),\ 1993 matrix_element,OptimizedFortranModel) 1994 1995 filename = 'mp_compute_loop_coefs.f' 1996 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\ 1997 matrix_element,OptimizedFortranModel) 1998 1999 if self.get_context(matrix_element)['ComputeColorFlows']: 2000 filename = 'compute_color_flows.f' 2001 self.write_compute_color_flows(writers.FortranWriter(filename), 2002 matrix_element, config_map = config_map) 2003 2004 # Extract number of external particles 2005 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2006 filename = 'nexternal.inc' 2007 self.write_nexternal_file(writers.FortranWriter(filename), 2008 nexternal, ninitial) 2009 2010 # Write general process information 2011 filename = 'process_info.inc' 2012 self.write_process_info_file(writers.FortranWriter(filename), 2013 matrix_element) 2014 2015 if self.get_context(matrix_element)['TIRCaching']: 2016 filename = 'tir_cache_size.inc' 2017 self.write_tir_cache_size_include(writers.FortranWriter(filename)) 2018 2019 return calls
2020
2021 - def set_optimized_output_specific_replace_dict_entries(self, matrix_element):
2022 """ Specify the entries of the replacement dictionary which are specific 2023 to the optimized output and only relevant to it (the more general entries 2024 are set in the the mother class LoopProcessExporterFortranSA.""" 2025 2026 max_loop_rank=matrix_element.get_max_loop_rank() 2027 matrix_element.rep_dict['maxrank']=max_loop_rank 2028 matrix_element.rep_dict['loop_max_coefs']=\ 2029 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank) 2030 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank() 2031 matrix_element.rep_dict['vertex_max_coefs']=\ 2032 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank) 2033 2034 matrix_element.rep_dict['nloopwavefuncs']=\ 2035 matrix_element.get_number_of_loop_wavefunctions() 2036 max_spin=matrix_element.get_max_loop_particle_spin() 2037 2038 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16 2039 matrix_element.rep_dict['nloops']=len(\ 2040 [1 for ldiag in matrix_element.get_loop_diagrams() for \ 2041 lamp in ldiag.get_loop_amplitudes()]) 2042 2043 if self.set_group_loops(matrix_element): 2044 matrix_element.rep_dict['nloop_groups']=\ 2045 len(matrix_element.get('loop_groups')) 2046 else: 2047 matrix_element.rep_dict['nloop_groups']=\ 2048 matrix_element.rep_dict['nloops']
2049
2050 - def write_loop_num(self, writer, matrix_element,fortran_model):
2051 """ Create the file containing the core subroutine called by CutTools 2052 which contains the Helas calls building the loop""" 2053 2054 replace_dict=copy.copy(matrix_element.rep_dict) 2055 2056 file = open(os.path.join(self.template_dir,'loop_num.inc')).read() 2057 file = file % replace_dict 2058 writer.writelines(file,context=self.get_context(matrix_element))
2059
2060 - def write_CT_interface(self, writer, matrix_element):
2061 """ We can re-use the mother one for the loop optimized output.""" 2062 LoopProcessExporterFortranSA.write_CT_interface(\ 2063 self, writer, matrix_element,optimized_output=True)
2064
2065 - def write_TIR_interface(self, writer, matrix_element):
2066 """ Create the file TIR_interface.f which does NOT contain the subroutine 2067 defining the loop HELAS-like calls along with the general interfacing 2068 subroutine. """ 2069 2070 # First write TIR_interface which interfaces MG5 with TIR. 2071 replace_dict=copy.copy(matrix_element.rep_dict) 2072 2073 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read() 2074 2075 # Check which loops have an Higgs effective vertex so as to correctly 2076 # implement CutTools limitation 2077 loop_groups = matrix_element.get('loop_groups') 2078 has_HEFT_vertex = [False]*len(loop_groups) 2079 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups): 2080 for lamp in loop_amp_list: 2081 final_lwf = lamp.get_final_loop_wavefunction() 2082 while not final_lwf is None: 2083 # We define here an HEFT vertex as any vertex built up from 2084 # only massless vectors and scalars (at least one of each) 2085 scalars = len([1 for wf in final_lwf.get('mothers') if 2086 wf.get('spin')==1]) 2087 vectors = len([1 for wf in final_lwf.get('mothers') if 2088 wf.get('spin')==3 and wf.get('mass')=='ZERO']) 2089 if scalars>=1 and vectors>=1 and \ 2090 scalars+vectors == len(final_lwf.get('mothers')): 2091 has_HEFT_vertex[i] = True 2092 break 2093 final_lwf = final_lwf.get_loop_mother() 2094 else: 2095 continue 2096 break 2097 2098 has_HEFT_list = [] 2099 chunk_size = 9 2100 for k in xrange(0, len(has_HEFT_vertex), chunk_size): 2101 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \ 2102 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)), 2103 ','.join(['.TRUE.' if l else '.FALSE.' for l in 2104 has_HEFT_vertex[k:k + chunk_size]]))) 2105 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list) 2106 2107 file = file % replace_dict 2108 2109 FPR = q_polynomial.FortranPolynomialRoutines( 2110 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\ 2111 sub_prefix=replace_dict['proc_prefix']) 2112 if self.tir_available_dict['pjfry']: 2113 file += '\n\n'+FPR.write_pjfry_mapping() 2114 if self.tir_available_dict['iregi']: 2115 file += '\n\n'+FPR.write_iregi_mapping() 2116 2117 if writer: 2118 writer.writelines(file,context=self.get_context(matrix_element)) 2119 else: 2120 return file
2121
2122 - def write_COLLIER_interface(self, writer, matrix_element):
2123 """ Create the file COLLIER_interface.f""" 2124 2125 # First write GOLEM_interface which interfaces MG5 with TIR. 2126 replace_dict=copy.copy(matrix_element.rep_dict) 2127 2128 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read() 2129 2130 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2131 coef_format=replace_dict['complex_dp_format'],\ 2132 sub_prefix=replace_dict['proc_prefix']) 2133 map_definition = [] 2134 collier_map = FPR.get_COLLIER_mapping() 2135 2136 chunk_size = 10 2137 for map_name, indices_list in \ 2138 [('COEFMAP_ZERO',[c[0] for c in collier_map]), 2139 ('COEFMAP_ONE',[c[1] for c in collier_map]), 2140 ('COEFMAP_TWO',[c[2] for c in collier_map]), 2141 ('COEFMAP_THREE',[c[3] for c in collier_map])]: 2142 for k in xrange(0, len(indices_list), chunk_size): 2143 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \ 2144 (map_name,k, min(k + chunk_size, len(indices_list))-1, 2145 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size]))) 2146 2147 replace_dict['collier_coefmap'] = '\n'.join(map_definition) 2148 2149 file = file % replace_dict 2150 2151 if writer: 2152 writer.writelines(file,context=self.get_context(matrix_element)) 2153 else: 2154 return file
2155
2156 - def write_GOLEM_interface(self, writer, matrix_element):
2157 """ Create the file GOLEM_interface.f which does NOT contain the subroutine 2158 defining the loop HELAS-like calls along with the general interfacing 2159 subroutine. """ 2160 2161 # First write GOLEM_interface which interfaces MG5 with TIR. 2162 replace_dict=copy.copy(matrix_element.rep_dict) 2163 2164 # We finalize TIR result differently wether we used the built-in 2165 # squaring against the born. 2166 if not self.get_context(matrix_element)['AmplitudeReduction']: 2167 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX' 2168 else: 2169 replace_dict['loop_induced_sqsoindex']='' 2170 2171 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read() 2172 2173 file = file % replace_dict 2174 2175 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\ 2176 coef_format=replace_dict['complex_dp_format'],\ 2177 sub_prefix=replace_dict['proc_prefix']) 2178 2179 file += '\n\n'+FPR.write_golem95_mapping() 2180 2181 if writer: 2182 writer.writelines(file,context=self.get_context(matrix_element)) 2183 else: 2184 return file
2185
2186 - def write_polynomial_subroutines(self,writer,matrix_element):
2187 """ Subroutine to create all the subroutines relevant for handling 2188 the polynomials representing the loop numerator """ 2189 2190 # First create 'loop_max_coefs.inc' 2191 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w') 2192 IncWriter.writelines("""INTEGER LOOPMAXCOEFS 2193 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)""" 2194 %matrix_element.rep_dict) 2195 2196 # Then coef_specs directly in DHELAS if it does not exist already 2197 # 'coef_specs.inc'. If several processes exported different files there, 2198 # it is fine because the overall maximum value will overwrite it in the 2199 # end 2200 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc') 2201 if not os.path.isfile(coef_specs_path): 2202 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2203 IncWriter.writelines("""INTEGER MAXLWFSIZE 2204 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2205 INTEGER VERTEXMAXCOEFS 2206 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2207 %matrix_element.rep_dict) 2208 IncWriter.close() 2209 2210 # List of all subroutines to place there 2211 subroutines=[] 2212 2213 # Start from the routine in the template 2214 replace_dict = copy.copy(matrix_element.rep_dict) 2215 2216 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2217 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read() 2218 # The double precision version of the basic polynomial routines, such as 2219 # create_loop_coefs 2220 replace_dict['complex_format'] = replace_dict['complex_dp_format'] 2221 replace_dict['real_format'] = replace_dict['real_dp_format'] 2222 replace_dict['mp_prefix'] = '' 2223 replace_dict['kind'] = 8 2224 replace_dict['zero_def'] = '0.0d0' 2225 replace_dict['one_def'] = '1.0d0' 2226 dp_routine = dp_routine % replace_dict 2227 # The quadruple precision version of the basic polynomial routines 2228 replace_dict['complex_format'] = replace_dict['complex_mp_format'] 2229 replace_dict['real_format'] = replace_dict['real_mp_format'] 2230 replace_dict['mp_prefix'] = 'MP_' 2231 replace_dict['kind'] = 16 2232 replace_dict['zero_def'] = '0.0e0_16' 2233 replace_dict['one_def'] = '1.0e0_16' 2234 mp_routine = mp_routine % replace_dict 2235 subroutines.append(dp_routine) 2236 subroutines.append(mp_routine) 2237 2238 # Initialize the polynomial routine writer 2239 poly_writer=q_polynomial.FortranPolynomialRoutines( 2240 matrix_element.get_max_loop_rank(), 2241 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2242 sub_prefix=replace_dict['proc_prefix'], 2243 proc_prefix=replace_dict['proc_prefix'], 2244 mp_prefix='') 2245 # Write the polynomial constant module common to all 2246 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n') 2247 2248 mp_poly_writer=q_polynomial.FortranPolynomialRoutines( 2249 matrix_element.get_max_loop_rank(), 2250 updater_max_rank = matrix_element.get_max_loop_vertex_rank(), 2251 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'], 2252 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_') 2253 # The eval subroutine 2254 subroutines.append(poly_writer.write_polynomial_evaluator()) 2255 subroutines.append(mp_poly_writer.write_polynomial_evaluator()) 2256 # The add coefs subroutine 2257 subroutines.append(poly_writer.write_add_coefs()) 2258 subroutines.append(mp_poly_writer.write_add_coefs()) 2259 # The merging one for creating the loop coefficients 2260 subroutines.append(poly_writer.write_wl_merger()) 2261 subroutines.append(mp_poly_writer.write_wl_merger()) 2262 for wl_update in matrix_element.get_used_wl_updates(): 2263 # We pick here the most appropriate way of computing the 2264 # tensor product depending on the rank of the two tensors. 2265 # The various choices below come out from a careful comparison of 2266 # the different methods using the valgrind profiler 2267 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0: 2268 # If any of the rank is 0, or if they are both equal to 1, 2269 # then we are better off using the full expanded polynomial, 2270 # and let the compiler optimize it. 2271 subroutines.append(poly_writer.write_expanded_wl_updater(\ 2272 wl_update[0],wl_update[1])) 2273 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\ 2274 wl_update[0],wl_update[1])) 2275 elif wl_update[0] >= wl_update[1]: 2276 # If the loop polynomial is larger then we will filter and loop 2277 # over the vertex coefficients first. The smallest product for 2278 # which the routines below could be used is then 2279 # loop_rank_2 x vertex_rank_1 2280 subroutines.append(poly_writer.write_compact_wl_updater(\ 2281 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2282 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2283 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True)) 2284 else: 2285 # This happens only when the rank of the updater (vertex coef) 2286 # is larger than the one of the loop coef and none of them is 2287 # zero. This never happens in renormalizable theories but it 2288 # can happen in the HEFT ones or other effective ones. In this 2289 # case the typicaly use of this routine if for the product 2290 # loop_rank_1 x vertex_rank_2 2291 subroutines.append(poly_writer.write_compact_wl_updater(\ 2292 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2293 subroutines.append(mp_poly_writer.write_compact_wl_updater(\ 2294 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False)) 2295 2296 writer.writelines('\n\n'.join(subroutines), 2297 context=self.get_context(matrix_element))
2298
2299 - def write_mp_compute_loop_coefs(self, writer, matrix_element, fortran_model):
2300 """Create the write_mp_compute_loop_coefs.f file.""" 2301 2302 if not matrix_element.get('processes') or \ 2303 not matrix_element.get('diagrams'): 2304 return 0 2305 2306 # Set lowercase/uppercase Fortran code 2307 2308 writers.FortranWriter.downcase = False 2309 2310 replace_dict = copy.copy(matrix_element.rep_dict) 2311 2312 # Extract helas calls 2313 squared_orders = matrix_element.get_squared_order_contribs() 2314 split_orders = matrix_element.get('processes')[0].get('split_orders') 2315 2316 born_ct_helas_calls , uvct_helas_calls = \ 2317 fortran_model.get_born_ct_helas_calls(matrix_element, 2318 squared_orders=squared_orders, split_orders=split_orders) 2319 self.turn_to_mp_calls(born_ct_helas_calls) 2320 self.turn_to_mp_calls(uvct_helas_calls) 2321 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2322 matrix_element,group_loops=self.group_loops, 2323 squared_orders=squared_orders,split_orders=split_orders) 2324 # The proc_prefix must be replaced 2325 coef_construction = [c % matrix_element.rep_dict for c 2326 in coef_construction] 2327 self.turn_to_mp_calls(coef_construction) 2328 self.turn_to_mp_calls(coef_merging) 2329 2330 file = open(os.path.join(self.template_dir,\ 2331 'mp_compute_loop_coefs.inc')).read() 2332 2333 # Setup the contextual environment which is used in the splitting 2334 # functions below 2335 context = self.get_context(matrix_element) 2336 file=self.split_HELASCALLS(writer,replace_dict,\ 2337 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\ 2338 'mp_born_ct_helas_calls','mp_helas_calls_ampb', 2339 required_so_broadcaster = 'MP_CT_REQ_SO_DONE', 2340 continue_label = 2000, 2341 momenta_array_name = 'MP_P', 2342 context=context) 2343 file=self.split_HELASCALLS(writer,replace_dict,\ 2344 'mp_helas_calls_split.inc',file,uvct_helas_calls,\ 2345 'mp_uvct_helas_calls','mp_helas_calls_uvct', 2346 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE', 2347 continue_label = 3000, 2348 momenta_array_name = 'MP_P', 2349 context=context) 2350 file=self.split_HELASCALLS(writer,replace_dict,\ 2351 'mp_helas_calls_split.inc',file,coef_construction,\ 2352 'mp_coef_construction','mp_coef_construction', 2353 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE', 2354 continue_label = 4000, 2355 momenta_array_name = 'MP_P', 2356 context=context) 2357 2358 replace_dict['mp_coef_merging']='\n'.join(coef_merging) 2359 2360 file = file % replace_dict 2361 2362 # Write the file 2363 writer.writelines(file,context=context)
2364
2365 - def write_color_matrix_data_file(self, writer, col_matrix):
2366 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding 2367 to the color coefficients for JAMP(L|B)*JAMP(L|B).""" 2368 2369 res = [] 2370 for line in range(len(col_matrix._col_basis1)): 2371 numerators = [] 2372 denominators = [] 2373 for row in range(len(col_matrix._col_basis2)): 2374 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)] 2375 numerators.append('%6r'%coeff[0].numerator) 2376 denominators.append('%6r'%( 2377 coeff[0].denominator*(-1 if coeff[1] else 1))) 2378 res.append(' '.join(numerators)) 2379 res.append(' '.join(denominators)) 2380 2381 res.append('EOF') 2382 2383 writer.writelines('\n'.join(res))
2384
2385 - def write_color_flow_coefs_data_file(self, writer, color_amplitudes, 2386 color_basis):
2387 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients 2388 list of the color_amplitudes in the argument of this function.""" 2389 2390 my_cs = color.ColorString() 2391 2392 res = [] 2393 2394 for jamp_number, coeff_list in enumerate(color_amplitudes): 2395 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number]) 2396 # Order the ColorString so that its ordering is canonical. 2397 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0] 2398 res.append('%d # Coefficient for flow number %d with expr. %s'\ 2399 %(len(coeff_list), jamp_number+1, repr(ordered_cs))) 2400 # A line element is a tuple (numerator, denominator, amplitude_id) 2401 line_element = [] 2402 2403 for (coefficient, amp_number) in coeff_list: 2404 coef = self.cat_coeff(\ 2405 coefficient[0],coefficient[1],coefficient[2],coefficient[3]) 2406 line_element.append((coef[0].numerator, 2407 coef[0].denominator*(-1 if coef[1] else 1),amp_number)) 2408 # Sort them by growing amplitude number 2409 line_element.sort(key=lambda el:el[2]) 2410 2411 for i in range(3): 2412 res.append(' '.join('%6r'%elem[i] for elem in line_element)) 2413 2414 res.append('EOF') 2415 writer.writelines('\n'.join(res))
2416
2417 - def write_compute_color_flows(self, writer, matrix_element, config_map):
2418 """Writes the file compute_color_flows.f which uses the AMPL results 2419 from a common block to project them onto the color flow space so as 2420 to compute the JAMP quantities. For loop induced processes, this file 2421 will also contain a subroutine computing AMPL**2 for madevent 2422 multichanneling.""" 2423 2424 loop_col_amps = matrix_element.get_loop_color_amplitudes() 2425 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps) 2426 2427 dat_writer = open(pjoin('..','MadLoop5_resources', 2428 '%(proc_prefix)sLoopColorFlowCoefs.dat' 2429 %matrix_element.rep_dict),'w') 2430 self.write_color_flow_coefs_data_file(dat_writer, 2431 loop_col_amps, matrix_element.get('loop_color_basis')) 2432 dat_writer.close() 2433 2434 dat_writer = open(pjoin('..','MadLoop5_resources', 2435 '%(proc_prefix)sLoopColorFlowMatrix.dat' 2436 %matrix_element.rep_dict),'w') 2437 self.write_color_matrix_data_file(dat_writer, 2438 matrix_element.get('color_matrix')) 2439 dat_writer.close() 2440 2441 if matrix_element.get('processes')[0].get('has_born'): 2442 born_col_amps = matrix_element.get_born_color_amplitudes() 2443 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps) 2444 dat_writer = open(pjoin('..','MadLoop5_resources', 2445 '%(proc_prefix)sBornColorFlowCoefs.dat' 2446 %matrix_element.rep_dict),'w') 2447 self.write_color_flow_coefs_data_file(dat_writer, 2448 born_col_amps, matrix_element.get('born_color_basis')) 2449 dat_writer.close() 2450 2451 dat_writer = open(pjoin('..','MadLoop5_resources', 2452 '%(proc_prefix)sBornColorFlowMatrix.dat' 2453 %matrix_element.rep_dict),'w') 2454 self.write_color_matrix_data_file(dat_writer, 2455 color_amp.ColorMatrix(matrix_element.get('born_color_basis'))) 2456 dat_writer.close() 2457 else: 2458 matrix_element.rep_dict['nBornFlows'] = 0 2459 2460 replace_dict = copy.copy(matrix_element.rep_dict) 2461 2462 # The following variables only have to be defined for the LoopInduced 2463 # output for madevent. 2464 if self.get_context(matrix_element)['MadEventOutput']: 2465 self.get_amp2_lines(matrix_element, replace_dict, config_map) 2466 else: 2467 replace_dict['config_map_definition'] = '' 2468 replace_dict['config_index_map_definition'] = '' 2469 replace_dict['nmultichannels'] = 0 2470 replace_dict['nmultichannel_configs'] = 0 2471 2472 # The nmultichannels entry will be used in the matrix<i> wrappers as 2473 # well, so we add it to the general_replace_dict too. 2474 matrix_element.rep_dict['nmultichannels'] = \ 2475 replace_dict['nmultichannels'] 2476 matrix_element.rep_dict['nmultichannel_configs'] = \ 2477 replace_dict['nmultichannel_configs'] 2478 2479 2480 file = open(os.path.join(self.template_dir,\ 2481 'compute_color_flows.inc')).read()%replace_dict 2482 2483 writer.writelines(file,context=self.get_context(matrix_element))
2484
2485 - def write_global_specs(self, matrix_element_list, output_path=None):
2486 """ From the list of matrix element, or the single matrix element, derive 2487 the global quantities to write in global_coef_specs.inc""" 2488 2489 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList, 2490 loop_helas_objects.LoopHelasProcess)): 2491 matrix_element_list = matrix_element_list.get_matrix_elements() 2492 2493 if isinstance(matrix_element_list, list): 2494 me_list = matrix_element_list 2495 else: 2496 me_list = [matrix_element_list] 2497 2498 if output_path is None: 2499 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc') 2500 else: 2501 out_path = output_path 2502 2503 open(out_path,'w').write( 2504 """ integer MAXNEXTERNAL 2505 parameter(MAXNEXTERNAL=%d) 2506 integer OVERALLMAXRANK 2507 parameter(OVERALLMAXRANK=%d) 2508 integer NPROCS 2509 parameter(NPROCS=%d)"""%( 2510 max(me.get_nexternal_ninitial()[0] for me in me_list), 2511 max(me.get_max_loop_rank() for me in me_list), 2512 len(me_list)))
2513 2514
2515 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2516 """ If processes with different maximum loop wavefunction size or 2517 different maximum loop vertex rank have to be output together, then 2518 the file 'coef.inc' in the HELAS Source folder must contain the overall 2519 maximum of these quantities. It is not safe though, and the user has 2520 been appropriatly warned at the output stage """ 2521 2522 # Remove the existing link 2523 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\ 2524 'coef_specs.inc') 2525 os.remove(coef_specs_path) 2526 2527 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16} 2528 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin] 2529 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank( 2530 overall_max_loop_vert_rank) 2531 # Replace it by the appropriate value 2532 IncWriter=writers.FortranWriter(coef_specs_path,'w') 2533 IncWriter.writelines("""INTEGER MAXLWFSIZE 2534 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 2535 INTEGER VERTEXMAXCOEFS 2536 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 2537 %{'max_lwf_size':overall_max_lwf_size, 2538 'vertex_max_coefs':overall_max_loop_vert_coefs}) 2539 IncWriter.close()
2540
2541 - def setup_check_sa_replacement_dictionary(self, matrix_element, \ 2542 split_orders,squared_orders,amps_orders):
2543 """ Sets up the replacement dictionary for the writeout of the steering 2544 file check_sa.f""" 2545 if len(squared_orders)<1: 2546 matrix_element.rep_dict['print_so_loop_results']=\ 2547 "write(*,*) 'No split orders defined.'" 2548 elif len(squared_orders)==1: 2549 matrix_element.rep_dict['set_coupling_target']='' 2550 matrix_element.rep_dict['print_so_loop_results']=\ 2551 "write(*,*) 'All loop contributions are of split orders (%s)'"%( 2552 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \ 2553 for i in range(len(split_orders))])) 2554 else: 2555 matrix_element.rep_dict['set_coupling_target']='\n'.join([ 2556 '# Here we leave the default target squared split order to -1, meaning that we'+ 2557 ' aim at computing all individual contributions. You can choose otherwise.', 2558 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict]) 2559 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([ 2560 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join( 2561 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))), 2562 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1), 2563 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1), 2564 "ELSE", 2565 "write(*,*) ' > accuracy = NA'", 2566 "ENDIF", 2567 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1), 2568 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1), 2569 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1) 2570 ]) for j, so in enumerate(squared_orders)]) 2571 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join( 2572 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+ 2573 ['\n'.join([ 2574 "write (69,*) 'Loop_SO_Results %s'"%(' '.join( 2575 ['%d'%so_value for so_value in so])), 2576 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1), 2577 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1), 2578 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1), 2579 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1), 2580 ]) for j, so in enumerate(squared_orders)]) 2581 2582 # We must reconstruct here the born squared orders. 2583 squared_born_so_orders = [] 2584 for i, amp_order in enumerate(amps_orders['born_amp_orders']): 2585 for j in range(0,i+1): 2586 key = tuple([ord1 + ord2 for ord1,ord2 in \ 2587 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])]) 2588 if not key in squared_born_so_orders: 2589 squared_born_so_orders.append(key) 2590 if len(squared_born_so_orders)<1: 2591 matrix_element.rep_dict['print_so_born_results'] = '' 2592 elif len(squared_born_so_orders)==1: 2593 matrix_element.rep_dict['print_so_born_results'] = \ 2594 "write(*,*) 'All Born contributions are of split orders (%s)'"%( 2595 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i]) 2596 for i in range(len(split_orders))])) 2597 else: 2598 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([ 2599 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join( 2600 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1) 2601 for j, so in enumerate(squared_born_so_orders)]) 2602 matrix_element.rep_dict['write_so_born_results'] = '\n'.join( 2603 ['\n'.join([ 2604 "write (69,*) 'Born_SO_Results %s'"%(' '.join( 2605 ['%d'%so_value for so_value in so])), 2606 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1), 2607 ]) for j, so in enumerate(squared_born_so_orders)]) 2608 2609 # Add a bottom bar to both print_so_[loop|born]_results 2610 matrix_element.rep_dict['print_so_born_results'] += \ 2611 '\nwrite (*,*) "---------------------------------"' 2612 matrix_element.rep_dict['print_so_loop_results'] += \ 2613 '\nwrite (*,*) "---------------------------------"'
2614
2615 - def write_tir_cache_size_include(self, writer):
2616 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2617 cache the the user wishes to employ and the default value for it. 2618 This can have an impact on MadLoop speed when using stability checks 2619 but also impacts in a non-negligible way MadLoop's memory footprint. 2620 It is therefore important that the user can chose its size.""" 2621 2622 # For the standalone optimized output, a size of one is necessary. 2623 # The MadLoop+MadEvent output sets it to 2 because it can gain further 2624 # speed increase with a TIR cache of size 2 due to the structure of the 2625 # calls to MadLoop there. 2626 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)" 2627 writer.writelines(tir_cach_size)
2628
2629 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \ 2630 write_auxiliary_files=True,):
2631 """Create the loop_matrix.f file.""" 2632 2633 if not matrix_element.get('processes') or \ 2634 not matrix_element.get('diagrams'): 2635 return 0 2636 2637 # Set lowercase/uppercase Fortran code 2638 writers.FortranWriter.downcase = False 2639 2640 # Starting off with the treatment of the split_orders since some 2641 # of the information extracted there will come into the 2642 # general_replace_dict. Split orders are abbreviated SO in all the 2643 # keys of the replacement dictionaries. 2644 2645 # Take care of the split_orders 2646 squared_orders, amps_orders = matrix_element.get_split_orders_mapping() 2647 # Creating here a temporary list containing only the information of 2648 # what are the different squared split orders contributing 2649 # (i.e. not using max_contrib_amp_number and max_contrib_ref_amp_number) 2650 sqso_contribs = [sqso[0] for sqso in squared_orders] 2651 split_orders = matrix_element.get('processes')[0].get('split_orders') 2652 # The entries set in the function below are only for check_sa written 2653 # out in write_loop__matrix_element_v4 (it is however placed here because the 2654 # split order information is only available here). 2655 self.setup_check_sa_replacement_dictionary(matrix_element, 2656 split_orders,sqso_contribs,amps_orders) 2657 2658 # Now recast the split order basis for the loop, born and counterterm 2659 # amplitude into one single splitorderbasis. 2660 overall_so_basis = list(set( 2661 [born_so[0] for born_so in amps_orders['born_amp_orders']]+ 2662 [born_so[0] for born_so in amps_orders['loop_amp_orders']])) 2663 # We must re-sort it to make sure it follows an increasing WEIGHT order 2664 order_hierarchy = matrix_element.get('processes')[0]\ 2665 .get('model').get('order_hierarchy') 2666 if set(order_hierarchy.keys()).union(set(split_orders))==\ 2667 set(order_hierarchy.keys()): 2668 overall_so_basis.sort(key= lambda so: 2669 sum([order_hierarchy[split_orders[i]]*order_power for \ 2670 i, order_power in enumerate(so)])) 2671 2672 # Those are additional entries used throughout the different files of 2673 # MadLoop5 2674 matrix_element.rep_dict['split_order_str_list'] = str(split_orders) 2675 matrix_element.rep_dict['nSO'] = len(split_orders) 2676 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs) 2677 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis) 2678 2679 writers.FortranWriter('nsquaredSO.inc').writelines( 2680 """INTEGER NSQUAREDSO 2681 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO']) 2682 2683 replace_dict = copy.copy(matrix_element.rep_dict) 2684 # Build the general array mapping the split orders indices to their 2685 # definition 2686 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\ 2687 overall_so_basis,'AMPSPLITORDERS')) 2688 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\ 2689 sqso_contribs,'SQPLITORDERS')) 2690 2691 # Specify what are the squared split orders selected by the proc def. 2692 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index( 2693 matrix_element.get('processes')[0],sqso_contribs) 2694 2695 # Now we build the different arrays storing the split_orders ID of each 2696 # amp. 2697 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders']) 2698 for SO in amps_orders['loop_amp_orders']: 2699 for amp_number in SO[1]: 2700 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2701 2702 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list( 2703 ampSO_list,'LOOPAMPORDERS')) 2704 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders']) 2705 for SO in amps_orders['born_amp_orders']: 2706 for amp_number in SO[1]: 2707 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1 2708 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list( 2709 ampSO_list,'BORNAMPORDERS')) 2710 2711 # We then go to the TIR setup 2712 # The first entry is the CutTools, we make sure it is available 2713 looplibs_av=['.TRUE.'] 2714 # one should be careful about the order in the following as it must match 2715 # the ordering in MadLoopParamsCard. 2716 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']: 2717 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \ 2718 self.tir_available_dict[tir_lib] else '.FALSE.') 2719 replace_dict['data_looplibs_av']=','.join(looplibs_av) 2720 2721 # Helicity offset convention 2722 # For a given helicity, the attached integer 'i' means 2723 # 'i' in ]-inf;-HELOFFSET[ -> Helicity is equal, up to a sign, 2724 # to helicity number abs(i+HELOFFSET) 2725 # 'i' == -HELOFFSET -> Helicity is analytically zero 2726 # 'i' in ]-HELOFFSET,inf[ -> Helicity is contributing with weight 'i'. 2727 # If it is zero, it is skipped. 2728 # Typically, the hel_offset is 10000 2729 replace_dict['hel_offset'] = 10000 2730 2731 # Extract overall denominator 2732 # Averaging initial state color, spin, and identical FS particles 2733 den_factor_line = self.get_den_factor_line(matrix_element) 2734 replace_dict['den_factor_line'] = den_factor_line 2735 2736 # When the user asks for the polarized matrix element we must 2737 # multiply back by the helicity averaging factor 2738 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2739 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2740 matrix_element.get_beams_hel_avg_factor() 2741 2742 if write_auxiliary_files: 2743 # Write out the color matrix 2744 (CMNum,CMDenom) = self.get_color_matrix(matrix_element) 2745 CMWriter=open(pjoin('..','MadLoop5_resources', 2746 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w') 2747 for ColorLine in CMNum: 2748 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2749 CMWriter.close() 2750 CMWriter=open(pjoin('..','MadLoop5_resources', 2751 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w') 2752 for ColorLine in CMDenom: 2753 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n') 2754 CMWriter.close() 2755 2756 # Write out the helicity configurations 2757 HelConfigs=matrix_element.get_helicity_matrix() 2758 HelConfigWriter=open(pjoin('..','MadLoop5_resources', 2759 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w') 2760 for HelConfig in HelConfigs: 2761 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n') 2762 HelConfigWriter.close() 2763 2764 # Extract helas calls 2765 born_ct_helas_calls, uvct_helas_calls = \ 2766 fortran_model.get_born_ct_helas_calls(matrix_element, 2767 squared_orders=squared_orders,split_orders=split_orders) 2768 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\ 2769 matrix_element,group_loops=self.group_loops, 2770 squared_orders=squared_orders,split_orders=split_orders) 2771 2772 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\ 2773 group_loops=self.group_loops, 2774 squared_orders=squared_orders, split_orders=split_orders) 2775 # The proc_prefix must be replaced 2776 coef_construction = [c % matrix_element.rep_dict for c 2777 in coef_construction] 2778 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls] 2779 2780 file = open(os.path.join(self.template_dir,\ 2781 'loop_matrix_standalone.inc')).read() 2782 2783 # Setup the contextual environment which is used in the splitting 2784 # functions below 2785 context = self.get_context(matrix_element) 2786 file=self.split_HELASCALLS(writer,replace_dict,\ 2787 'helas_calls_split.inc',file,born_ct_helas_calls,\ 2788 'born_ct_helas_calls','helas_calls_ampb', 2789 required_so_broadcaster = 'CT_REQ_SO_DONE', 2790 continue_label = 2000, context = context) 2791 file=self.split_HELASCALLS(writer,replace_dict,\ 2792 'helas_calls_split.inc',file,uvct_helas_calls,\ 2793 'uvct_helas_calls','helas_calls_uvct', 2794 required_so_broadcaster = 'UVCT_REQ_SO_DONE', 2795 continue_label = 3000, context=context) 2796 file=self.split_HELASCALLS(writer,replace_dict,\ 2797 'helas_calls_split.inc',file,coef_construction,\ 2798 'coef_construction','coef_construction', 2799 required_so_broadcaster = 'LOOP_REQ_SO_DONE', 2800 continue_label = 4000, context=context) 2801 file=self.split_HELASCALLS(writer,replace_dict,\ 2802 'helas_calls_split.inc',file,loop_CT_calls,\ 2803 'loop_CT_calls','loop_CT_calls', 2804 required_so_broadcaster = 'CTCALL_REQ_SO_DONE', 2805 continue_label = 5000, context=context) 2806 2807 # Add the entries above to the general_replace_dict so that it can be 2808 # used by write_mp_compute_loop_coefs later 2809 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls'] 2810 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls'] 2811 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls'] 2812 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction'] 2813 2814 replace_dict['coef_merging']='\n'.join(coef_merging) 2815 2816 file = file % replace_dict 2817 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \ 2818 loop_CT_calls)) 2819 if writer: 2820 # Write the file 2821 writer.writelines(file,context=context) 2822 return number_of_calls 2823 else: 2824 # Return it to be written along with the others 2825 return number_of_calls, file
2826 2827 #=============================================================================== 2828 # LoopProcessExporterFortranSA 2829 #===============================================================================
2830 -class LoopProcessExporterFortranMatchBox(LoopProcessOptimizedExporterFortranSA, 2831 export_v4.ProcessExporterFortranMatchBox):
2832 """Class to take care of exporting a set of loop matrix elements in the 2833 Fortran format.""" 2834 2835 default_opt = {'clean': False, 'complex_mass':False, 2836 'export_format':'madloop_matchbox', 'mp':True, 2837 'loop_dir':'', 'cuttools_dir':'', 2838 'fortran_compiler':'gfortran', 2839 'output_dependencies':'external', 2840 'sa_symmetry':True} 2841 2842 2843
2844 - def get_color_string_lines(self, matrix_element):
2845 """Return the color matrix definition lines for this matrix element. Split 2846 rows in chunks of size n.""" 2847 2848 return export_v4.ProcessExporterFortranMatchBox.get_color_string_lines(matrix_element)
2849 2850
2851 - def get_JAMP_lines(self, *args, **opts):
2852 """Adding leading color part of the colorflow""" 2853 2854 return export_v4.ProcessExporterFortranMatchBox.get_JAMP_lines(self, *args, **opts)
2855
2856 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
2857 """ To not mix notations between borns and virtuals we call it here also MG5 """ 2858 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
2859 2860 2861 #=============================================================================== 2862 # LoopInducedExporter 2863 #===============================================================================
2864 -class LoopInducedExporterME(LoopProcessOptimizedExporterFortranSA):
2865 """ A class to specify all the functions common to LoopInducedExporterMEGroup 2866 and LoopInducedExporterMENoGroup (but not relevant for the original 2867 Madevent exporters)""" 2868 2869 madloop_makefile_name = 'makefile_MadLoop' 2870 2871
2872 - def __init__(self, *args, **opts):
2873 """ Initialize the process, setting the proc characteristics.""" 2874 super(LoopInducedExporterME, self).__init__(*args, **opts) 2875 self.proc_characteristic['loop_induced'] = True
2876
2877 - def get_context(self,*args,**opts):
2878 """ Make sure that the contextual variable MadEventOutput is set to 2879 True for this exporter""" 2880 2881 context = super(LoopInducedExporterME,self).get_context(*args,**opts) 2882 context['MadEventOutput'] = True 2883 return context
2884 2885
2886 - def get_source_libraries_list(self):
2887 """ Returns the list of libraries to be compiling when compiling the 2888 SOURCE directory. It is different for loop_induced processes and 2889 also depends on the value of the 'output_dependencies' option""" 2890 2891 libraries_list = super(LoopInducedExporterME,self).\ 2892 get_source_libraries_list() 2893 2894 if self.dependencies=='internal': 2895 libraries_list.append('$(LIBDIR)libcts.$(libext)') 2896 libraries_list.append('$(LIBDIR)libiregi.$(libext)') 2897 2898 return libraries_list
2899 2906
2907 - def copy_template(self, *args, **opts):
2908 """Pick the right mother functions 2909 """ 2910 # Call specifically the necessary building functions for the mixed 2911 # template setup for both MadEvent and MadLoop standalone 2912 LoopProcessExporterFortranSA.loop_additional_template_setup(self, 2913 copy_Source_makefile=False) 2914 2915 LoopProcessOptimizedExporterFortranSA.\ 2916 loop_optimized_additional_template_setup(self)
2917 2918 2919 #=========================================================================== 2920 # Create jpeg diagrams, html pages,proc_card_mg5.dat and madevent.tar.gz 2921 #===========================================================================
2922 - def finalize(self, matrix_elements, history, mg5options, flaglist):
2923 """Function to finalize v4 directory, for inheritance. 2924 """ 2925 2926 self.proc_characteristic['loop_induced'] = True 2927 2928 # This can be uncommented if one desires to have the MadLoop 2929 # initialization performed at the end of the output phase. 2930 # Alternatively, one can simply execute the command 'initMadLoop' in 2931 # the madevent interactive interface after the output. 2932 # from madgraph.interface.madevent_interface import MadLoopInitializer 2933 # MadLoopInitializer.init_MadLoop(self.dir_path, 2934 # subproc_prefix=self.SubProc_prefix, MG_options=None) 2935 2936 self.write_global_specs(matrix_elements)
2937
2938 - def write_tir_cache_size_include(self, writer):
2939 """Write the file 'tir_cache_size.inc' which sets the size of the TIR 2940 cache the the user wishes to employ and the default value for it. 2941 This can have an impact on MadLoop speed when using stability checks 2942 but also impacts in a non-negligible way MadLoop's memory footprint. 2943 It is therefore important that the user can chose its size.""" 2944 2945 # In this case of MadLoop+MadEvent output, we set it to 2 because we 2946 # gain further speed increase with a TIR cache of size 2 due to the 2947 # the fact that we call MadLoop once per helicity configuration in this 2948 # case. 2949 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)" 2950 writer.writelines(tir_cach_size)
2951
2952 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model, 2953 proc_id = None, config_map = [], subproc_number = None):
2954 """ Write it the wrapper to call the ML5 subroutine in the library.""" 2955 2956 # Generating the MadEvent wrapping ME's routines 2957 if not matrix_element.get('processes') or \ 2958 not matrix_element.get('diagrams'): 2959 return 0 2960 2961 if not isinstance(writer, writers.FortranWriter): 2962 raise writers.FortranWriter.FortranWriterError(\ 2963 "writer not FortranWriter") 2964 2965 replace_dict = copy.copy(matrix_element.rep_dict) 2966 2967 # Extract version number and date from VERSION file 2968 info_lines = self.get_mg5_info_lines() 2969 replace_dict['info_lines'] = info_lines 2970 2971 # Extract process info lines 2972 process_lines = self.get_process_info_lines(matrix_element) 2973 replace_dict['process_lines'] = process_lines 2974 2975 # Set proc_id 2976 # It can be set to None when write_matrix_element_v4 is called without 2977 # grouping. In this case the subroutine SMATRIX should take an empty 2978 # suffix. 2979 if proc_id is None: 2980 replace_dict['proc_id'] = '' 2981 else: 2982 replace_dict['proc_id'] = proc_id 2983 2984 #set the average over the number of initial helicities 2985 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor() 2986 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\ 2987 matrix_element.get_beams_hel_avg_factor() 2988 2989 # Extract helicity lines 2990 helicity_lines = self.get_helicity_lines(matrix_element) 2991 replace_dict['helicity_lines'] = helicity_lines 2992 2993 2994 # Extract ndiags 2995 ndiags = len(matrix_element.get('diagrams')) 2996 replace_dict['ndiags'] = ndiags 2997 2998 # Set define_iconfigs_lines 2999 replace_dict['define_iconfigs_lines'] = \ 3000 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG 3001 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG""" 3002 3003 if proc_id: 3004 # Set lines for subprocess group version 3005 # Set define_iconfigs_lines 3006 replace_dict['define_iconfigs_lines'] += \ 3007 """\nINTEGER SUBDIAG(MAXSPROC),IB(2) 3008 COMMON/TO_SUB_DIAG/SUBDIAG,IB""" 3009 # Set set_amp2_line 3010 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id 3011 else: 3012 # Standard running 3013 # Set set_amp2_line 3014 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)" 3015 3016 # If group_numer 3017 replace_dict['ml_prefix'] = \ 3018 self.get_ME_identifier(matrix_element, subproc_number, proc_id) 3019 3020 # Extract ncolor 3021 ncolor = max(1, len(matrix_element.get('color_basis'))) 3022 replace_dict['ncolor'] = ncolor 3023 3024 n_tot_diags = len(matrix_element.get_loop_diagrams()) 3025 replace_dict['n_tot_diags'] = n_tot_diags 3026 3027 file = open(pjoin(_file_path, \ 3028 'iolibs/template_files/%s' % self.matrix_file)).read() 3029 file = file % replace_dict 3030 3031 # Write the file 3032 writer.writelines(file) 3033 3034 return 0, ncolor
3035
3036 - def get_amp2_lines(self, *args, **opts):
3037 """Make sure the function is implemented in the daughters""" 3038 3039 raise NotImplemented, 'The function get_amp2_lines must be called in '+\ 3040 ' the daugthers of LoopInducedExporterME'
3041 3042 #=============================================================================== 3043 # LoopInducedExporterMEGroup 3044 #===============================================================================
3045 -class LoopInducedExporterMEGroup(LoopInducedExporterME, 3046 export_v4.ProcessExporterFortranMEGroup):
3047 """Class to take care of exporting a set of grouped loop induced matrix 3048 elements""" 3049 3050 matrix_file = "matrix_loop_induced_madevent_group.inc" 3051 3057
3058 - def write_source_makefile(self, *args, **opts):
3059 """Pick the correct write_source_makefile function from 3060 ProcessExporterFortranMEGroup""" 3061 3062 export_v4.ProcessExporterFortranMEGroup.write_source_makefile(self, 3063 *args, **opts)
3064
3065 - def copy_template(self, *args, **opts):
3066 """Pick the right mother functions 3067 """ 3068 # Call specifically the necessary building functions for the mixed 3069 # template setup for both MadEvent and MadLoop standalone 3070 3071 # Start witht the MadEvent one 3072 export_v4.ProcessExporterFortranMEGroup.copy_template(self,*args,**opts) 3073 3074 # Then the MadLoop-standalone related one 3075 LoopInducedExporterME.copy_template(self, *args, **opts)
3076
3077 - def finalize(self, *args, **opts):
3078 """Pick the right mother functions 3079 """ 3080 # Call specifically what finalize_v4_directory must be used, so that the 3081 # MRO doesn't interfere. 3082 3083 self.proc_characteristic['loop_induced'] = True 3084 3085 export_v4.ProcessExporterFortranMEGroup.finalize(self,*args,**opts) 3086 3087 # And the finilize from LoopInducedExporterME which essentially takes 3088 # care of MadLoop virtuals initialization 3089 LoopInducedExporterME.finalize(self,*args,**opts)
3090
3091 - def generate_subprocess_directory(self, subproc_group, 3092 fortran_model,group_number):
3093 """Generate the Pn directory for a subprocess group in MadEvent, 3094 including the necessary matrix_N.f files, configs.inc and various 3095 other helper files""" 3096 3097 # Generate the MadLoop files 3098 calls = 0 3099 matrix_elements = subproc_group.get('matrix_elements') 3100 for ime, matrix_element in enumerate(matrix_elements): 3101 self.unique_id +=1 3102 calls += self.generate_loop_subprocess(matrix_element,fortran_model, 3103 group_number = group_number, proc_id = str(ime+1), 3104 # group_number = str(subproc_group.get('number')), proc_id = str(ime+1), 3105 config_map = subproc_group.get('diagram_maps')[ime], 3106 unique_id=self.unique_id) 3107 3108 # Then generate the MadEvent files 3109 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory( 3110 self, subproc_group,fortran_model,group_number) 3111 3112 return calls
3113
3114 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3115 """Return the various replacement dictionary inputs necessary for the 3116 multichanneling amp2 definition for the loop-induced MadEvent output. 3117 """ 3118 3119 if not config_map: 3120 raise MadGraph5Error, 'A multi-channeling configuration map is '+\ 3121 ' necessary for the MadEvent Loop-induced output with grouping.' 3122 3123 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3124 3125 ret_lines = [] 3126 # In this case, we need to sum up all amplitudes that have 3127 # identical topologies, as given by the config_map (which 3128 # gives the topology/config for each of the diagrams 3129 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement): 3130 diagrams = matrix_element.get_loop_diagrams() 3131 else: 3132 diagrams = matrix_element.get('diagrams') 3133 3134 # Note that we need to use AMP2 number corresponding to the first 3135 # diagram number used for that AMP2. 3136 # The dictionary below maps the config ID to this corresponding first 3137 # diagram number 3138 config_index_map = {} 3139 # For each diagram number, the dictionary below gives the config_id it 3140 # belongs to or 0 if it doesn't belong to any. 3141 loop_amp_ID_to_config = {} 3142 3143 # Combine the diagrams with identical topologies 3144 config_to_diag_dict = {} 3145 for idiag, diag in enumerate(diagrams): 3146 try: 3147 config_to_diag_dict[config_map[idiag]].append(idiag) 3148 except KeyError: 3149 config_to_diag_dict[config_map[idiag]] = [idiag] 3150 3151 for config in sorted(config_to_diag_dict.keys()): 3152 config_index_map[config] = (config_to_diag_dict[config][0] + 1) 3153 3154 # First add the UV and R2 counterterm amplitudes of each selected 3155 # diagram for the multichannel config 3156 CT_amp_numbers = [a.get('number') for a in \ 3157 sum([diagrams[idiag].get_ct_amplitudes() for \ 3158 idiag in config_to_diag_dict[config]], [])] 3159 3160 for CT_amp_number in CT_amp_numbers: 3161 loop_amp_ID_to_config[CT_amp_number] = config 3162 3163 # Now add here the loop amplitudes. 3164 loop_amp_numbers = [a.get('amplitudes')[0].get('number') 3165 for a in sum([diagrams[idiag].get_loop_amplitudes() for \ 3166 idiag in config_to_diag_dict[config]], [])] 3167 3168 for loop_amp_number in loop_amp_numbers: 3169 loop_amp_ID_to_config[loop_amp_number] = config 3170 3171 # Notice that the config_id's are not necessarily sequential here, so 3172 # the size of the config_index_map array has to be the maximum over all 3173 # config_ids. 3174 # config_index_map should never be empty unless there was no diagram, 3175 # so the expression below is ok. 3176 n_configs = max(config_index_map.keys()) 3177 replace_dict['nmultichannel_configs'] = n_configs 3178 3179 # We must fill the empty entries of the map with the dummy amplitude 3180 # number 0. 3181 conf_list = [(config_index_map[i] if i in config_index_map else 0) \ 3182 for i in range(1,n_configs+1)] 3183 # Now the placeholder 'nmultichannels' refers to the number of 3184 # multi-channels which are contributing, so we must filter out zeros. 3185 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0]) 3186 3187 # Now write the amp2 related inputs in the replacement dictionary 3188 res_list = [] 3189 chunk_size = 6 3190 for k in xrange(0, len(conf_list), chunk_size): 3191 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3192 (k + 1, min(k + chunk_size, len(conf_list)), 3193 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3194 3195 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3196 3197 res_list = [] 3198 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3199 amp_list = [loop_amp_ID_to_config[i] for i in \ 3200 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3201 chunk_size = 6 3202 for k in xrange(0, len(amp_list), chunk_size): 3203 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3204 (k + 1, min(k + chunk_size, len(amp_list)), 3205 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3206 3207 replace_dict['config_map_definition'] = '\n'.join(res_list) 3208 3209 return
3210 3211 #=============================================================================== 3212 # LoopInducedExporterMENoGroup 3213 #===============================================================================
3214 -class LoopInducedExporterMENoGroup(LoopInducedExporterME, 3215 export_v4.ProcessExporterFortranME):
3216 """Class to take care of exporting a set of individual loop induced matrix 3217 elements""" 3218 3219 matrix_file = "matrix_loop_induced_madevent.inc" 3220 3226
3227 - def write_source_makefile(self, *args, **opts):
3228 """Pick the correct write_source_makefile function from 3229 ProcessExporterFortran""" 3230 3231 super(export_v4.ProcessExporterFortranME,self).\ 3232 write_source_makefile(*args, **opts)
3233
3234 - def copy_template(self, *args, **opts):
3235 """Pick the right mother functions 3236 """ 3237 # Call specifically the necessary building functions for the mixed 3238 # template setup for both MadEvent and MadLoop standalone 3239 3240 # Start witht the MadEvent one 3241 export_v4.ProcessExporterFortranME.copy_template(self,*args,**opts) 3242 3243 # Then the MadLoop-standalone related one 3244 LoopInducedExporterME.copy_template(self, *args, **opts)
3245
3246 - def finalize(self, *args, **opts):
3247 """Pick the right mother functions 3248 """ 3249 3250 self.proc_characteristic['loop_induced'] = True 3251 # Call specifically what finalize must be used, so that the 3252 # MRO doesn't interfere. 3253 export_v4.ProcessExporterFortranME.finalize(self, *args, **opts) 3254 3255 # And the finilize_v4 from LoopInducedExporterME which essentially takes 3256 # care of MadLoop virtuals initialization 3257 LoopInducedExporterME.finalize(self, *args, **opts)
3258
3259 - def generate_subprocess_directory(self, matrix_element, fortran_model, me_number):
3260 """Generate the Pn directory for a subprocess group in MadEvent, 3261 including the necessary matrix_N.f files, configs.inc and various 3262 other helper files""" 3263 3264 self.unique_id += 1 3265 # Then generate the MadLoop files 3266 calls = self.generate_loop_subprocess(matrix_element,fortran_model, 3267 group_number = me_number, 3268 unique_id=self.unique_id) 3269 3270 3271 # First generate the MadEvent files 3272 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory( 3273 self, matrix_element, fortran_model, me_number) 3274 return calls
3275
3276 - def get_amp2_lines(self, matrix_element, replace_dict, config_map):
3277 """Return the amp2(i) = sum(amp for diag(i))^2 lines""" 3278 3279 if config_map: 3280 raise MadGraph5Error, 'A configuration map should not be specified'+\ 3281 ' for the Loop induced exporter without grouping.' 3282 3283 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 3284 # Get minimum legs in a vertex 3285 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \ 3286 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]] 3287 minvert = min(vert_list) if vert_list!=[] else 0 3288 3289 # Note that we need to use AMP2 number corresponding to the first 3290 # diagram number used for that AMP2. 3291 # The dictionary below maps the config ID to this corresponding first 3292 # diagram number 3293 config_index_map = {} 3294 # For each diagram number, the dictionary below gives the config_id it 3295 # belongs to or 0 if it doesn't belong to any. 3296 loop_amp_ID_to_config = {} 3297 3298 n_configs = 0 3299 for idiag, diag in enumerate(matrix_element.get('diagrams')): 3300 # Ignore any diagrams with 4-particle vertices. 3301 use_for_multichanneling = True 3302 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert: 3303 use_for_multichanneling = False 3304 curr_config = 0 3305 else: 3306 n_configs += 1 3307 curr_config = n_configs 3308 3309 if not use_for_multichanneling: 3310 if 0 not in config_index_map: 3311 config_index_map[0] = idiag + 1 3312 else: 3313 config_index_map[curr_config] = idiag + 1 3314 3315 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()] 3316 for CT_amp in CT_amps: 3317 loop_amp_ID_to_config[CT_amp] = curr_config 3318 3319 Loop_amps = [a.get('amplitudes')[0].get('number') 3320 for a in diag.get_loop_amplitudes()] 3321 for Loop_amp in Loop_amps: 3322 loop_amp_ID_to_config[Loop_amp] = curr_config 3323 3324 # Now write the amp2 related inputs in the replacement dictionary 3325 n_configs = len([k for k in config_index_map.keys() if k!=0]) 3326 replace_dict['nmultichannel_configs'] = n_configs 3327 # Now the placeholder 'nmultichannels' refers to the number of 3328 # multi-channels which are contributing which, in the non-grouped case 3329 # is always equal to the total number of multi-channels. 3330 replace_dict['nmultichannels'] = n_configs 3331 3332 res_list = [] 3333 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys()) 3334 if i!=0] 3335 chunk_size = 6 3336 for k in xrange(0, len(conf_list), chunk_size): 3337 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \ 3338 (k + 1, min(k + chunk_size, len(conf_list)), 3339 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]]))) 3340 3341 replace_dict['config_index_map_definition'] = '\n'.join(res_list) 3342 3343 res_list = [] 3344 n_loop_amps = max(loop_amp_ID_to_config.keys()) 3345 amp_list = [loop_amp_ID_to_config[i] for i in \ 3346 sorted(loop_amp_ID_to_config.keys()) if i!=0] 3347 chunk_size = 6 3348 for k in xrange(0, len(amp_list), chunk_size): 3349 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \ 3350 (k + 1, min(k + chunk_size, len(amp_list)), 3351 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]]))) 3352 3353 replace_dict['config_map_definition'] = '\n'.join(res_list)
3354